gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
'''
Roulette
========
:class:`Roulette` provides a rolling way of selecting values like in iOS
and android date pickers.
Dependencies
------------
*. the garden package ``kivy.garden.tickline``. Use ``garden install tickline``
to install it like any other garden package.
*. the garden package ``kivy.garden.roulettescroll``.
Usage
-----
It's simple to use. To give a choice from 0, 1, ..., 9, use::
CyclicRoulette(cycle=10, zero_indexed=True)
Or if we need to select a year, with the default value being the current one,
we can use::
year_roulette = Roulette()
year_roulette.select_and_center(datetime.now().year)
:class:`CyclicRoulette` inherits from :class:`Roulette`, so any setting
pertaining to :class:`Roulette` also applies to :class:`CyclicRoulette`.
If the values need to be formatted, pass the desired format spec string to
:attr:`Roulette.format_spec`, like so::
CyclicRoulette(cycle=60, zero_indexed=True, format_spec='{:02d}'}
This configuration is used much for time display, so there's a convenience
class :class:`TimeFormatCyclicRoulette` for it, with ``zero_index=True``
and ``format_spc='{:02d}'``.
:attr:`Roulette.density` controls how many values are displayed. To show
3 values at a time, pass ``density=3``. Fractional values will partially
hide values on the edges.
Here's a complete working example with all of the concepts above, a
primitive datetime selector::
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.boxlayout import BoxLayout
from kivy.garden.roulette import Roulette, CyclicRoulette, \
TimeFormatCyclicRoulette
b = BoxLayout()
b.add_widget(Roulette(density=2.8, selected_value=2013))
b.add_widget(CyclicRoulette(cycle=12, density=2.8, zero_indexed=False))
b.add_widget(CyclicRoulette(cycle=30, density=2.8, zero_indexed=False))
b.add_widget(TimeFormatCyclicRoulette(cycle=24))
b.add_widget(TimeFormatCyclicRoulette(cycle=60))
b.add_widget(TimeFormatCyclicRoulette(cycle=60))
runTouchApp(b)
:attr:`Roulette.selected_value` contains the current selection. When the
roulette is still, this is the number at the center. If the roulette is
moving, this is the last number centered on before the roulette started
moving.
If you need more real time information on the value selection, you may
confer :attr:`Roulette.rolling_value`. This is the value the would be selected
if the roulette were to stop right now. So if the roulette is not moving,
then :attr:`Roulette.rolling_value` is equal to :attr:`Roulette.selected_value`.
Otherwise, they are expected to be different. Note, however, that this
value is not stable to widget resizing, as is ``selected_value``.
To center the roulette, you can call :meth:`Roulette.center_on`. This method
performs an animation to center on the desired value. It does NOT change the
:attr:`~Roulette.selected_value`. The method mentioned above,
:attr:`~Roulette.select_and_center`, on the other hand, does change
the selected value.
To integrate the roulette animations with other UI elements, it may be necessary
to specially handle the :meth:`Roulette.center_on` animation. The event
:meth:`Roulette.on_center` can be listened for. It signals the completion
of the ``center_on`` animation.
NICER GRAPHICS!
---------------
I didn't focus much on the graphics, or to closely simulate the iOS or android
experience. You are encourage to contribute to improve the default appearance
of the roulette!
.. versionchanged:: 0.1.1
a background image can be added by giving the path
to :attr:`Roulette.background_image`.
Extending
---------
:class:`Roulette` inherits from :class:`kivy.garden.tickline.Tickline`, and
as such, uses its system of tickline, tick, and labeller. Hence extensive
customizations may be done by extending :class:`Slot` and :class:`CyclicSlot`,
the default tick classes of respectively :class:`Roulette` and
:class:`CyclicRoulette`, and :class:`SlotLabeller`, the default labeller class
of :class:`Roulette`.
'''
__version__ = '0.1.1'
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.text import Label as CoreLabel
from kivy.garden.roulettescroll import RouletteScrollEffect
from kivy.garden.tickline import Tick, Tickline, TickLabeller
from kivy.graphics.vertex_instructions import Rectangle
from kivy.lang import Builder
from kivy.metrics import sp, dp
from kivy.properties import ListProperty, ObjectProperty, AliasProperty, \
NumericProperty, BooleanProperty, StringProperty, OptionProperty
from kivy.graphics.vertex_instructions import BorderImage
from os.path import join, dirname
from time import strftime
import datetime
def get_str_date(index, strftime):
if -1 <= index <= 1: return(('Yesterday', 'Today', 'Tomorrow')[index+1])
rolldate = datetime.datetime.now()
rolldate += datetime.timedelta(days=index)
return rolldate.strftime(strftime)
class SlotLabeller(TickLabeller):
def __init__(self, tickline):
self.instructions = {}
self.re_init()
self.tickline = tickline
def re_init(self):
self.to_pop = set(self.instructions)
self.to_push = []
def register(self, tick, tick_index, tick_info):
tickline = self.tickline
if tick_index not in self.instructions:
self.to_push.append(tick_index)
texture = tick.get_label_texture(tick_index)
else:
self.to_pop.remove(tick_index)
texture = self.instructions[tick_index].texture
if texture:
if tickline.is_vertical():
tick_pos = tick_info[1] + tick_info[3] / 2
pos = (tickline.center_x - texture.width / 2,
tick_pos - texture.height / 2)
else:
tick_pos = tick_info[0] + tick_info[2] / 2
pos = (tick_pos - texture.width / 2,
tickline.center_y - texture.height / 2)
# only need to update the position if label is saved
self.instructions.setdefault(tick_index,
Rectangle(texture=texture, size=texture.size,
group=self.group_id)).pos = pos
def make_labels(self):
canvas = self.tickline.canvas
for index in self.to_push:
rect = self.instructions[index]
canvas.add(rect)
for index in self.to_pop:
rect = self.instructions.pop(index)
canvas.remove(rect)
#===============================================================================
# Slots
#===============================================================================
class Slot(Tick):
tick_size = ListProperty([0, 0])
font_size = NumericProperty('20sp')
int_valued = BooleanProperty(True)
format_str = StringProperty('{}')
rollinglist = ListProperty([])
invert = BooleanProperty(False)
strftime = StringProperty('')
def value_str(self, value):
return self.format_str.format(value)
def slot_value(self, index, *args, **kw):
'''returns the selection value that corresponds to ``index``.
Should be overriden if necessary.'''
if self.int_valued:
return int(round(index))
return index
def index_of(self, val, *args, **kw):
'''returns the index that corresponds to a selection value ``val``.
Should be override if necessary.'''
return val
def get_label_texture(self, index, **kw):
if self.invert: index *= -1
label = CoreLabel(text=get_str_date(index, self.strftime) if self.strftime else \
self.rollinglist[self.slot_value(index)] if len(self.rollinglist) else \
self.value_str(self.slot_value(index)),
font_size=self.font_size, **kw)
label.refresh()
return label.texture
class CyclicSlot(Slot):
cycle = NumericProperty(10)
zero_indexed = BooleanProperty(False)
def get_first_value(self):
return 0 if self.zero_indexed else 1
def set_first_value(self, val):
self.zero_indexed = not val
first_value = AliasProperty(get_first_value, set_first_value, cache=True,
bind=['zero_indexed'])
'''provides a default value.'''
def slot_value(self, index):
cycle = self.cycle
val = index % cycle + 1 - self.zero_indexed
val = Slot.slot_value(self, val)
if val >= cycle + 1 - self.zero_indexed:
val -= cycle
return val
def index_of(self, val, current_index, *args, **kw):
'''returns the closest index to ``current_index`` that would correspond
to ``val``. All indices should be localized.'''
if self.int_valued:
val = int(round(val))
zero_indexed = self.zero_indexed
cycle = self.cycle
if not (1 - zero_indexed) <= val <= cycle - zero_indexed:
raise ValueError('value must be between {} and {}; {} is given'.
format(1 - zero_indexed, cycle - zero_indexed, val))
base_index = val - 1 + self.zero_indexed
n = round((current_index - base_index) / cycle)
index = n * cycle + base_index
return index
#===============================================================================
# Roulettes
#===============================================================================
class Roulette(Tickline):
__events__ = ('on_centered',)
#===========================================================================
# overrides
#===========================================================================
background_image = StringProperty(
join(dirname(__file__), 'roulettebackground.png'),
allownone=True)
'''background image, overriding the default of None in :class:`Tickline`.
.. versionadded:: 0.1.1
'''
background_color = ListProperty([1, 1, 1, 1])
'''background color, defaulting to [1, 1, 1, 1], overriding default
of [0, 0, 0] in :class:`Tickline`.
.. versionadded:: 0.1.1
'''
cover_background = BooleanProperty(False)
'''determines whether to draw a Rectangle covering the background.
Overriding :class:`Tickline` default to give False.
.. versionadded:: 0.1.1
'''
size_hint_x = NumericProperty(None, allownone=True)
labeller_cls = ObjectProperty(SlotLabeller)
zoomable = BooleanProperty(False)
draw_line = BooleanProperty(False)
font_size = NumericProperty('20sp')
width = NumericProperty('60dp')
# doesn't make sense to have more than 1 tick
tick = ObjectProperty(None)
def get_ticks(self):
if self.tick:
return [self.tick]
else:
return []
def set_ticks(self, val):
self.tick = val[0]
ticks = AliasProperty(get_ticks, set_ticks, bind=['tick'])
#===========================================================================
# public attributes
#===========================================================================
rollinglist = ListProperty([])
invert = BooleanProperty(False)
strftime = StringProperty('')
selected_value = ObjectProperty(None)
'''the currently selected value.'''
format_str = StringProperty('{}')
'''formatting spec string for the values displayed.'''
tick_cls = ObjectProperty(Slot)
'''The class of the tick in this roulette. Defaults to
:class:`Slot`. Should be overriden as needed by child class.'''
int_valued = BooleanProperty(True)
'''indicates whether the values should be displayed as integers.'''
scroll_effect_cls = ObjectProperty(RouletteScrollEffect)
# has to be negative so that ``ScrollEffect.trigger_velocity_update``
# is always called
drag_threshold = NumericProperty(-1)
'''this is passed to the ``drag_threshold`` of :attr:`scroll_effect_cls`.
It is by default set to -1 to turn off the drag threshold.
'''
center_duration = NumericProperty(.3)
'''duration for the animation of :meth:`center_on`.'''
density = NumericProperty(4.2)
'''determines how many slots are shown at a time.'''
def get_rolling_value(self):
return self.tick.slot_value(self.tick.localize(self.index_mid))
def set_rolling_value(self, val):
self.index_mid = self.tick.globalize(val)
rolling_value = AliasProperty(get_rolling_value,
set_rolling_value,
bind=['index_mid'])
'''the val indicated by whatever slot is in the middle of the roulette.
If the roulette is still, then :attr:`rolling_value` is equal to
:attr:`selected_value`. Otherwise, they shouldn't be equal.
.. note::
This property is not stable under resizing, since often that will
change the slot in the middle.'''
def __init__(self, **kw):
self.tick = Slot()
self._trigger_set_selection = \
Clock.create_trigger(self.set_selected_value)
super(Roulette, self).__init__(**kw)
self.scale = dp(10)
self.tick = self.tick_cls()
self._trigger_calibrate()
def on_tick_cls(self, *args):
self.tick = self.tick_cls()
def on_tick(self, *args):
tick = self.tick
if tick:
tick.font_size = self.font_size
tick.int_valued = self.int_valued
tick.format_str = self.format_str
if self.strftime: tick.strftime = self.strftime
if self.invert:
if self.selected_value:
self.selected_value *= -1
tick.invert = self.invert
if len(self.rollinglist): tick.rollinglist = self.rollinglist
def on_size(self, *args):
self.scale = self.line_length / self.density
self.recenter()
def on_int_valued(self, *args):
if self.tick:
self.tick.int_valued = self.int_valued
def on_format_str(self, *args):
if self.tick:
self.tick.format_str = self.format_str
def get_anchor(self):
'''returns a legal stopping value for the :class:`RouletteScrollEffect`.
Should be overriden if necessary.'''
return 0
def _update_effect_constants(self, *args):
if not super(Roulette, self)._update_effect_constants(*args):
return
effect = self.scroll_effect
scale = self.scale
effect.pull_back_velocity = sp(50) / scale
def calibrate_scroll_effect(self, *args, **kw):
if not super(Roulette, self).calibrate_scroll_effect(*args, **kw):
return
anchor = self.get_anchor()
effect = self.scroll_effect
effect.interval = 1. / self.tick.scale_factor
effect.anchor = anchor
effect.on_coasted_to_stop = self._trigger_set_selection
def set_selected_value(self, *args):
'''set :attr:`selected_value` to the currently slot.'''
idx = self.round_(self.rolling_value)
if self.invert and len(self.rollinglist):
idx *= -1
self.selected_value = get_str_date(idx, self.strftime) if self.strftime else \
self.rollinglist[idx] if len(self.rollinglist) else idx
def round_(self, val):
'''round an arbitrary rolling value to a legal selection value.
Should be overriden if necessary.'''
if self.int_valued:
return int(round(val))
return round(val)
def recenter(self, *args):
if self.selected_value is not None:
self.center_on(self.selected_value)
self._trigger_calibrate()
def index_of(self, val):
'''returns the index that should be equivalent to a selection value
``val``. Should be overriden if necessary.'''
return val
def center_on(self, val, animate=True):
Animation.stop_all(self)
center_index = self.index_of(val)
half_length = self.line_length / 2. / self.scale
index_0 = center_index - half_length
index_1 = center_index + half_length
if animate:
anim = Animation(index_0=index_0, index_1=index_1,
duration=self.center_duration)
anim.on_complete = lambda *args: self._centered()
anim.start(self)
else:
self.index_0 = index_0
self.index_1 = index_1
self._centered()
def on_centered(self, *args):
'''event that fires when the operation :meth:`center_on` completes.
(and by extension, when :meth:`center` or :meth:`select_and_center`
completes). By default it doesn't do anything.'''
pass
def _centered(self, *args):
self._trigger_calibrate()
self.dispatch('on_centered')
def center(self, animate=True):
self.center_on(self.selected_value, animate)
def select_and_center(self, val, *args, **kw):
'''set :attr:`selected_value` to ``val`` and center on it. If
:attr:`selected_value` is already ``val``, return False; else return
True.'''
if self.selected_value == val:
return False
self.selected_value = val
self.center(*args, **kw)
return True
def is_rolling(self):
return self.scroll_effect.velocity != 0
class CyclicRoulette(Roulette):
'''roulette for displaying cyclic values.'''
tick_cls = ObjectProperty(CyclicSlot)
cycle = NumericProperty(10)
'''the cycle of values displayed.'''
zero_indexed = BooleanProperty(False)
'''whether the values displayed will start from 0 or 1.'''
def __init__(self, **kw):
super(CyclicRoulette, self).__init__(**kw)
self.selected_value = self.tick.first_value
self.center()
def on_tick(self, *args):
tick = self.tick
if tick:
tick.cycle = len(self.rollinglist) if len(self.rollinglist) else self.cycle
tick.zero_indexed = True if len(self.rollinglist) else self.zero_indexed
super(CyclicRoulette, self).on_tick(*args)
def on_cycle(self, *args):
if self.tick:
self.tick.cycle = len(self.rollinglist) if len(self.rollinglist) else self.cycle
def on_zero_indexed(self, *args):
if self.tick:
self.tick.zero_indexed = True if len(self.rollinglist) else self.zero_indexed
def index_of(self, val):
tick = self.tick
if not tick:
return val
return tick.index_of(val, tick.localize(self.index_mid))
class TimeFormatCyclicRoulette(CyclicRoulette):
'''formatted roulette for displaying time.'''
zero_indexed = BooleanProperty(True)
format_str = StringProperty('{:02d}')
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
b = BoxLayout()
b.add_widget(Roulette(width='120sp', density=2.8, selected_value=0, strftime="%a %d %b", invert=True))
b.add_widget(CyclicRoulette(width='75sp', density=2.8, rollinglist=['cat','dog','frog','mouse','rabbit']))
b.add_widget(Roulette(density=2.8, selected_value=2013))
b.add_widget(CyclicRoulette(cycle=12, density=2.8, zero_indexed=False))
b.add_widget(CyclicRoulette(cycle=30, density=2.8, zero_indexed=False))
b.add_widget(TimeFormatCyclicRoulette(cycle=24, format_str="{:02d}"))
b.add_widget(TimeFormatCyclicRoulette(cycle=60))
selected_value = Label()
rolling_value = Label()
for c in b.children:
c.bind(selected_value=lambda _, val:
selected_value.setter('text')(_,
'selected_value:\n' + str(val)),
rolling_value=lambda _, val:
rolling_value.setter('text')(_,
'rolling_value:\n' + str(val)))
b.add_widget(selected_value)
b.add_widget(rolling_value)
runTouchApp(b)
| |
"""Support for climate devices through the SmartThings cloud API."""
import asyncio
import logging
from typing import Iterable, Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN, ClimateDevice
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
ATTR_OPERATION_STATE = "operation_state"
MODE_TO_STATE = {
"auto": HVAC_MODE_HEAT_COOL,
"cool": HVAC_MODE_COOL,
"eco": HVAC_MODE_AUTO,
"rush hour": HVAC_MODE_AUTO,
"emergency heat": HVAC_MODE_HEAT,
"heat": HVAC_MODE_HEAT,
"off": HVAC_MODE_OFF,
}
STATE_TO_MODE = {
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_COOL: "cool",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_OFF: "off",
}
OPERATING_STATE_TO_ACTION = {
"cooling": CURRENT_HVAC_COOL,
"fan only": CURRENT_HVAC_FAN,
"heating": CURRENT_HVAC_HEAT,
"idle": CURRENT_HVAC_IDLE,
"pending cool": CURRENT_HVAC_COOL,
"pending heat": CURRENT_HVAC_HEAT,
"vent economizer": CURRENT_HVAC_FAN,
}
AC_MODE_TO_STATE = {
"auto": HVAC_MODE_HEAT_COOL,
"cool": HVAC_MODE_COOL,
"dry": HVAC_MODE_DRY,
"coolClean": HVAC_MODE_COOL,
"dryClean": HVAC_MODE_DRY,
"heat": HVAC_MODE_HEAT,
"heatClean": HVAC_MODE_HEAT,
"fanOnly": HVAC_MODE_FAN_ONLY,
}
STATE_TO_AC_MODE = {
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_COOL: "cool",
HVAC_MODE_DRY: "dry",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_FAN_ONLY: "fanOnly",
}
UNIT_MAP = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add climate entities for a config entry."""
ac_capabilities = [
Capability.air_conditioner_mode,
Capability.air_conditioner_fan_mode,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
]
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
entities = []
for device in broker.devices.values():
if not broker.any_assigned(device.device_id, CLIMATE_DOMAIN):
continue
if all(capability in device.capabilities for capability in ac_capabilities):
entities.append(SmartThingsAirConditioner(device))
else:
entities.append(SmartThingsThermostat(device))
async_add_entities(entities, True)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
supported = [
Capability.air_conditioner_mode,
Capability.demand_response_load_control,
Capability.air_conditioner_fan_mode,
Capability.power_consumption_report,
Capability.relative_humidity_measurement,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_fan_mode,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
Capability.thermostat_operating_state,
]
# Can have this legacy/deprecated capability
if Capability.thermostat in capabilities:
return supported
# Or must have all of these thermostat capabilities
thermostat_capabilities = [
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
]
if all(capability in capabilities for capability in thermostat_capabilities):
return supported
# Or must have all of these A/C capabilities
ac_capabilities = [
Capability.air_conditioner_mode,
Capability.air_conditioner_fan_mode,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
]
if all(capability in capabilities for capability in ac_capabilities):
return supported
return None
class SmartThingsThermostat(SmartThingsEntity, ClimateDevice):
"""Define a SmartThings climate entities."""
def __init__(self, device):
"""Init the class."""
super().__init__(device)
self._supported_features = self._determine_features()
self._hvac_mode = None
self._hvac_modes = None
def _determine_features(self):
flags = SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
if self._device.get_capability(
Capability.thermostat_fan_mode, Capability.thermostat
):
flags |= SUPPORT_FAN_MODE
return flags
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
await self._device.set_thermostat_fan_mode(fan_mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
mode = STATE_TO_MODE[hvac_mode]
await self._device.set_thermostat_mode(mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_set_temperature(self, **kwargs):
"""Set new operation mode and target temperatures."""
# Operation state
operation_state = kwargs.get(ATTR_HVAC_MODE)
if operation_state:
mode = STATE_TO_MODE[operation_state]
await self._device.set_thermostat_mode(mode, set_status=True)
await self.async_update()
# Heat/cool setpoint
heating_setpoint = None
cooling_setpoint = None
if self.hvac_mode == HVAC_MODE_HEAT:
heating_setpoint = kwargs.get(ATTR_TEMPERATURE)
elif self.hvac_mode == HVAC_MODE_COOL:
cooling_setpoint = kwargs.get(ATTR_TEMPERATURE)
else:
heating_setpoint = kwargs.get(ATTR_TARGET_TEMP_LOW)
cooling_setpoint = kwargs.get(ATTR_TARGET_TEMP_HIGH)
tasks = []
if heating_setpoint is not None:
tasks.append(
self._device.set_heating_setpoint(
round(heating_setpoint, 3), set_status=True
)
)
if cooling_setpoint is not None:
tasks.append(
self._device.set_cooling_setpoint(
round(cooling_setpoint, 3), set_status=True
)
)
await asyncio.gather(*tasks)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Update the attributes of the climate device."""
thermostat_mode = self._device.status.thermostat_mode
self._hvac_mode = MODE_TO_STATE.get(thermostat_mode)
if self._hvac_mode is None:
_LOGGER.debug(
"Device %s (%s) returned an invalid hvac mode: %s",
self._device.label,
self._device.device_id,
thermostat_mode,
)
modes = set()
supported_modes = self._device.status.supported_thermostat_modes
if isinstance(supported_modes, Iterable):
for mode in supported_modes:
state = MODE_TO_STATE.get(mode)
if state is not None:
modes.add(state)
else:
_LOGGER.debug(
"Device %s (%s) returned an invalid supported thermostat mode: %s",
self._device.label,
self._device.device_id,
mode,
)
else:
_LOGGER.debug(
"Device %s (%s) returned invalid supported thermostat modes: %s",
self._device.label,
self._device.device_id,
supported_modes,
)
self._hvac_modes = list(modes)
@property
def current_humidity(self):
"""Return the current humidity."""
return self._device.status.humidity
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.status.temperature
@property
def fan_mode(self):
"""Return the fan setting."""
return self._device.status.thermostat_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._device.status.supported_thermostat_fan_modes
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
return OPERATING_STATE_TO_ACTION.get(
self._device.status.thermostat_operating_state
)
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes
@property
def supported_features(self):
"""Return the supported features."""
return self._supported_features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self._device.status.cooling_setpoint
if self.hvac_mode == HVAC_MODE_HEAT:
return self._device.status.heating_setpoint
return None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.status.cooling_setpoint
return None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.status.heating_setpoint
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return UNIT_MAP.get(self._device.status.attributes[Attribute.temperature].unit)
class SmartThingsAirConditioner(SmartThingsEntity, ClimateDevice):
"""Define a SmartThings Air Conditioner."""
def __init__(self, device):
"""Init the class."""
super().__init__(device)
self._hvac_modes = None
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
await self._device.set_fan_mode(fan_mode, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_turn_off()
return
tasks = []
# Turn on the device if it's off before setting mode.
if not self._device.status.switch:
tasks.append(self._device.switch_on(set_status=True))
tasks.append(
self._device.set_air_conditioner_mode(
STATE_TO_AC_MODE[hvac_mode], set_status=True
)
)
await asyncio.gather(*tasks)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
tasks = []
# operation mode
operation_mode = kwargs.get(ATTR_HVAC_MODE)
if operation_mode:
if operation_mode == HVAC_MODE_OFF:
tasks.append(self._device.switch_off(set_status=True))
else:
if not self._device.status.switch:
tasks.append(self._device.switch_on(set_status=True))
tasks.append(self.async_set_hvac_mode(operation_mode))
# temperature
tasks.append(
self._device.set_cooling_setpoint(kwargs[ATTR_TEMPERATURE], set_status=True)
)
await asyncio.gather(*tasks)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_turn_on(self):
"""Turn device on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_turn_off(self):
"""Turn device off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the calculated fields of the AC."""
modes = {HVAC_MODE_OFF}
for mode in self._device.status.supported_ac_modes:
state = AC_MODE_TO_STATE.get(mode)
if state is not None:
modes.add(state)
else:
_LOGGER.debug(
"Device %s (%s) returned an invalid supported AC mode: %s",
self._device.label,
self._device.device_id,
mode,
)
self._hvac_modes = modes
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.status.temperature
@property
def device_state_attributes(self):
"""
Return device specific state attributes.
Include attributes from the Demand Response Load Control (drlc)
and Power Consumption capabilities.
"""
attributes = [
"drlc_status_duration",
"drlc_status_level",
"drlc_status_start",
"drlc_status_override",
"power_consumption_start",
"power_consumption_power",
"power_consumption_energy",
"power_consumption_end",
]
state_attributes = {}
for attribute in attributes:
value = getattr(self._device.status, attribute)
if value is not None:
state_attributes[attribute] = value
return state_attributes
@property
def fan_mode(self):
"""Return the fan setting."""
return self._device.status.fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._device.status.supported_ac_fan_modes
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._device.status.switch:
return HVAC_MODE_OFF
return AC_MODE_TO_STATE.get(self._device.status.air_conditioner_mode)
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._device.status.cooling_setpoint
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return UNIT_MAP.get(self._device.status.attributes[Attribute.temperature].unit)
| |
import logging
import engineio
import six
from . import base_manager
from . import packet
class Server(object):
"""A Socket.IO server.
This class implements a fully compliant Socket.IO web server with support
for websocket and long-polling transports.
:param engineio_options: A ``dict`` with options for the Engine.IO server.
The values are passed directly to the
``engineio.Server`` constructor.
:param client_manager_class: The class that will manage the client list.
The default value is appropriate for most
cases.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``.
:param binary: ``True`` to support binary payloads, ``False`` to treat all
payloads as text. On Python 2, if this is set to ``True``,
``unicode`` values are treated as text, and ``str`` and
``bytes`` values are treated as binary. This option has no
effect on Python 3, where text and binary payloads are
always automatically discovered.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param kwargs: Connection parameters for the underlying Engine.IO server.
The Engine.IO configuration supports the following settings:
:param async_mode: The library used for asynchronous operations. Valid
options are "threading", "eventlet" and "gevent". If
this argument is not given, "eventlet" is tried first,
then "gevent", and finally "threading". The websocket
transport is only supported in "eventlet" mode.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting.
:param ping_interval: The interval in seconds at which the client pings
the server.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport.
:param allow_upgrades: Whether to allow transport upgrades or not.
:param http_compression: Whether to compress packages when using the
polling transport.
:param compression_threshold: Only compress messages when their byte size
is greater than this value.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``.
"""
def __init__(self, client_manager_class=None, logger=False, binary=False,
json=None, **kwargs):
if client_manager_class is None:
client_manager_class = base_manager.BaseManager
self.manager = client_manager_class(self)
engineio_options = kwargs
engineio_logger = engineio_options.pop('engineio_logger', None)
if engineio_logger is not None:
engineio_options['logger'] = engineio_logger
if json is not None:
packet.Packet.json = json
engineio_options['json'] = json
self.eio = engineio.Server(**engineio_options)
self.eio.on('connect', self._handle_eio_connect)
self.eio.on('message', self._handle_eio_message)
self.eio.on('disconnect', self._handle_eio_disconnect)
self.binary = binary
self.environ = {}
self.handlers = {}
self._binary_packet = None
self._attachment_count = 0
self._attachments = []
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = logging.getLogger('socketio')
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
def on(self, event, handler=None, namespace=None):
"""Register an event handler.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the handler is associated with
the default namespace.
Example usage::
# as a decorator:
@socket_io.on('connect', namespace='/chat')
def connect_handler(sid, environ):
print('Connection request')
if environ['REMOTE_ADDR'] in blacklisted:
return False # reject
# as a method:
def message_handler(sid, msg):
print('Received message: ', msg)
eio.send(sid, 'response')
socket_io.on('message', namespace='/chat', message_handler)
The handler function receives the ``sid`` (session ID) for the
client as first argument. The ``'connect'`` event handler receives the
WSGI environment as a second argument, and can return ``False`` to
reject the connection. The ``'message'`` handler and handlers for
custom event names receive the message payload as a second argument.
Any values returned from a message handler will be passed to the
client's acknowledgement callback function if it exists. The
``'disconnect'`` handler does not take a second argument.
"""
namespace = namespace or '/'
def set_handler(handler):
if namespace not in self.handlers:
self.handlers[namespace] = {}
self.handlers[namespace][event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def emit(self, event, data=None, room=None, skip_sid=None, namespace=None,
callback=None):
"""Emit a custom event to one or more connected clients.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
"""
namespace = namespace or '/'
self.logger.info('emitting event "%s" to %s [%s]', event,
room or 'all', namespace)
self.manager.emit(event, data, namespace, room, skip_sid, callback)
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
This function emits an event with the name ``'message'``. Use
:func:`emit` to issue custom event names.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
"""
self.emit('message', data, room, skip_sid, namespace, callback)
def enter_room(self, sid, room, namespace=None):
"""Enter a room.
This function adds the client to a room. The :func:`emit` and
:func:`send` functions can optionally broadcast events to all the
clients in a room.
:param sid: Session ID of the client.
:param room: Room name. If the room does not exist it is created.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the default namespace is used.
"""
namespace = namespace or '/'
self.logger.info('%s is entering room %s [%s]', sid, room, namespace)
self.manager.enter_room(sid, namespace, room)
def leave_room(self, sid, room, namespace=None):
"""Leave a room.
This function removes the client from a room.
:param sid: Session ID of the client.
:param room: Room name.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the default namespace is used.
"""
namespace = namespace or '/'
self.logger.info('%s is leaving room %s [%s]', sid, room, namespace)
self.manager.leave_room(sid, namespace, room)
def close_room(self, room, namespace=None):
"""Close a room.
This function removes all the clients from the given room.
:param room: Room name.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the default namespace is used.
"""
namespace = namespace or '/'
self.logger.info('room %s is closing [%s]', room, namespace)
self.manager.close_room(namespace, room)
def rooms(self, sid, namespace=None):
"""Return the rooms a client is in.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the default namespace is used.
"""
namespace = namespace or '/'
return self.manager.get_rooms(sid, namespace)
def disconnect(self, sid, namespace=None):
"""Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
"""
namespace = namespace or '/'
self.logger.info('Disconnecting %s [%s]', sid, namespace)
self._send_packet(sid, packet.Packet(packet.DISCONNECT,
namespace=namespace))
self._trigger_event('disconnect', namespace, sid)
self.manager.disconnect(sid, namespace=namespace)
def transport(self, sid):
"""Return the name of the transport used by the client.
The two possible values returned by this function are ``'polling'``
and ``'websocket'``.
:param sid: The session of the client.
"""
return self.eio.transport(sid)
def handle_request(self, environ, start_response):
"""Handle an HTTP request from the client.
This is the entry point of the Socket.IO application, using the same
interface as a WSGI application. For the typical usage, this function
is invoked by the :class:`Middleware` instance, but it can be invoked
directly when the middleware is not used.
:param environ: The WSGI environment.
:param start_response: The WSGI ``start_response`` function.
This function returns the HTTP response body to deliver to the client
as a byte sequence.
"""
return self.eio.handle_request(environ, start_response)
def _emit_internal(self, sid, event, data, namespace=None, id=None):
"""Send a message to a client."""
if six.PY2 and not self.binary:
binary = False # pragma: nocover
else:
binary = None
self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace,
data=[event, data], id=id,
binary=binary))
def _send_packet(self, sid, pkt):
"""Send a Socket.IO packet to a client."""
encoded_packet = pkt.encode()
if isinstance(encoded_packet, list):
binary = False
for ep in encoded_packet:
self.eio.send(sid, ep, binary=binary)
binary = True
else:
self.eio.send(sid, encoded_packet, binary=False)
def _handle_connect(self, sid, namespace):
"""Handle a client connection request."""
namespace = namespace or '/'
self.manager.connect(sid, namespace)
if self._trigger_event('connect', namespace, sid,
self.environ[sid]) is False:
self.manager.disconnect(sid, namespace)
self._send_packet(sid, packet.Packet(packet.ERROR,
namespace=namespace))
else:
self._send_packet(sid, packet.Packet(packet.CONNECT,
namespace=namespace))
def _handle_disconnect(self, sid, namespace):
"""Handle a client disconnect."""
namespace = namespace or '/'
if namespace == '/':
namespace_list = list(self.manager.get_namespaces())
else:
namespace_list = [namespace]
for n in namespace_list:
if n != '/' and self.manager.is_connected(sid, n):
self._trigger_event('disconnect', n, sid)
self.manager.disconnect(sid, n)
if namespace == '/' and self.manager.is_connected(sid, namespace):
self._trigger_event('disconnect', '/', sid)
self.manager.disconnect(sid, '/')
if sid in self.environ:
del self.environ[sid]
def _handle_event(self, sid, namespace, id, data):
"""Handle an incoming client event."""
namespace = namespace or '/'
self.logger.info('received event "%s" from %s [%s]', data[0], sid,
namespace)
r = self._trigger_event(data[0], namespace, sid, *data[1:])
if id is not None:
# send ACK packet with the response returned by the handler
if isinstance(r, tuple):
data = list(r)
elif isinstance(r, list):
data = r
else:
data = [r]
if six.PY2 and not self.binary:
binary = False # pragma: nocover
else:
binary = None
self._send_packet(sid, packet.Packet(packet.ACK,
namespace=namespace,
id=id, data=data,
binary=binary))
def _handle_ack(self, sid, namespace, id, data):
"""Handle ACK packets from the client."""
namespace = namespace or '/'
self.logger.info('received ack from %s [%s]', sid, namespace)
self.manager.trigger_callback(sid, namespace, id, data)
def _trigger_event(self, event, namespace, *args):
"""Invoke an application event handler."""
if namespace in self.handlers and event in self.handlers[namespace]:
return self.handlers[namespace][event](*args)
def _handle_eio_connect(self, sid, environ):
"""Handle the Engine.IO connection event."""
self.environ[sid] = environ
self._handle_connect(sid, '/')
def _handle_eio_message(self, sid, data):
"""Dispatch Engine.IO messages."""
if self._attachment_count > 0:
self._attachments.append(data)
self._attachment_count -= 1
if self._attachment_count == 0:
self._binary_packet.reconstruct_binary(self._attachments)
if self._binary_packet.packet_type == packet.BINARY_EVENT:
self._handle_event(sid, self._binary_packet.namespace,
self._binary_packet.id,
self._binary_packet.data)
else:
self._handle_ack(sid, self._binary_packet.namespace,
self._binary_packet.id,
self._binary_packet.data)
self._binary_packet = None
self._attachments = []
else:
pkt = packet.Packet(encoded_packet=data)
if pkt.packet_type == packet.CONNECT:
self._handle_connect(sid, pkt.namespace)
elif pkt.packet_type == packet.DISCONNECT:
self._handle_disconnect(sid, pkt.namespace)
elif pkt.packet_type == packet.EVENT:
self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.ACK:
self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.BINARY_EVENT or \
pkt.packet_type == packet.BINARY_ACK:
self._binary_packet = pkt
self._attachments = []
self._attachment_count = pkt.attachment_count
elif pkt.packet_type == packet.ERROR:
raise ValueError('Unexpected ERROR packet.')
else:
raise ValueError('Unknown packet type.')
def _handle_eio_disconnect(self, sid):
"""Handle Engine.IO disconnect event."""
self._handle_disconnect(sid, '/')
| |
# Copyright 2013 eBay Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import webob
from cinder.api.contrib import qos_specs_manage
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_notifier
def stub_qos_specs(id):
res = dict(name='qos_specs_' + str(id))
res.update(dict(consumer='back-end'))
res.update(dict(id=str(id)))
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
res.update(dict(specs=specs))
return objects.QualityOfServiceSpecs(**res)
def stub_qos_associates(id):
return [{
'association_type': 'volume_type',
'name': 'FakeVolTypeName',
'id': fake.VOLUME_TYPE_ID}]
def return_qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return [
stub_qos_specs(fake.QOS_SPEC_ID),
stub_qos_specs(fake.QOS_SPEC2_ID),
stub_qos_specs(fake.QOS_SPEC3_ID),
]
def return_qos_specs_get_qos_specs(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
return stub_qos_specs(id)
def return_qos_specs_delete(context, id, force):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.IN_USE_ID:
raise exception.QoSSpecsInUse(specs_id=id)
pass
def return_qos_specs_delete_keys(context, id, keys):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
if 'foo' in keys:
raise exception.QoSSpecsKeyNotFound(specs_id=id,
specs_key='foo')
def return_qos_specs_update(context, id, specs):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.INVALID_ID:
raise exception.InvalidQoSSpecs(reason=id)
elif id == fake.UPDATE_FAILED_ID:
raise exception.QoSSpecsUpdateFailed(specs_id=id,
qos_specs=specs)
pass
def return_qos_specs_create(context, name, specs):
if name == 'qos_spec_%s' % fake.ALREADY_EXISTS_ID:
raise exception.QoSSpecsExists(specs_id=name)
elif name == 'qos_spec_%s' % fake.ACTION_FAILED_ID:
raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs)
elif name == 'qos_spec_%s' % fake.INVALID_ID:
raise exception.InvalidQoSSpecs(reason=name)
return objects.QualityOfServiceSpecs(name=name,
specs=specs,
consumer='back-end',
id=fake.QOS_SPEC_ID)
def return_get_qos_associations(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.RAISE_ID:
raise exception.CinderException()
return stub_qos_associates(id)
def return_associate_qos_specs(context, id, type_id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.ACTION_FAILED_ID:
raise exception.QoSSpecsAssociateFailed(specs_id=id,
type_id=type_id)
elif id == fake.ACTION2_FAILED_ID:
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=type_id)
if type_id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.VolumeTypeNotFound(
volume_type_id=type_id)
pass
def return_disassociate_all(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == fake.ACTION2_FAILED_ID:
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=None)
@ddt.ddt
class QoSSpecManageApiTest(test.TestCase):
def _create_qos_specs(self, name, values=None):
"""Create a transfer object."""
if values:
specs = dict(name=name, qos_specs=values)
else:
specs = {'name': name,
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}}
return db.qos_specs_create(self.ctxt, specs)['id']
def setUp(self):
super(QoSSpecManageApiTest, self).setUp()
self.flags(host='fake')
self.controller = qos_specs_manage.QoSSpecsController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
self.qos_id1 = self._create_qos_specs("Qos_test_1")
self.qos_id2 = self._create_qos_specs("Qos_test_2")
self.qos_id3 = self._create_qos_specs("Qos_test_3")
self.qos_id4 = self._create_qos_specs("Qos_test_4")
@mock.patch('cinder.volume.qos_specs.get_all_specs',
side_effect=return_qos_specs_get_all)
def test_index(self, mock_get_all_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
names = set()
for item in res['qos_specs']:
self.assertEqual('value1', item['specs']['key1'])
names.add(item['name'])
expected_names = ['qos_specs_%s' % fake.QOS_SPEC_ID,
'qos_specs_%s' % fake.QOS_SPEC2_ID,
'qos_specs_%s' % fake.QOS_SPEC3_ID]
self.assertEqual(set(expected_names), names)
def test_index_with_limit(self):
url = '/v2/%s/qos-specs?limit=2' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(2, len(res['qos_specs']))
self.assertEqual(self.qos_id4, res['qos_specs'][0]['id'])
self.assertEqual(self.qos_id3, res['qos_specs'][1]['id'])
expect_next_link = ('http://localhost/v2/%s/qos-specs?limit'
'=2&marker=%s') % (
fake.PROJECT_ID, res['qos_specs'][1]['id'])
self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href'])
def test_index_with_offset(self):
url = '/v2/%s/qos-specs?offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
def test_index_with_offset_out_of_range(self):
url = '/v2/%s/qos-specs?offset=356576877698707' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index,
req)
def test_index_with_limit_and_offset(self):
url = '/v2/%s/qos-specs?limit=2&offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(2, len(res['qos_specs']))
self.assertEqual(self.qos_id3, res['qos_specs'][0]['id'])
self.assertEqual(self.qos_id2, res['qos_specs'][1]['id'])
def test_index_with_marker(self):
url = '/v2/%s/qos-specs?marker=%s' % (fake.PROJECT_ID, self.qos_id4)
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
def test_index_with_filter(self):
url = '/v2/%s/qos-specs?id=%s' % (fake.PROJECT_ID, self.qos_id4)
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(1, len(res['qos_specs']))
self.assertEqual(self.qos_id4, res['qos_specs'][0]['id'])
def test_index_with_sort_keys(self):
url = '/v2/%s/qos-specs?sort=id' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(4, len(res['qos_specs']))
expect_result = [self.qos_id1, self.qos_id2,
self.qos_id3, self.qos_id4]
expect_result.sort(reverse=True)
self.assertEqual(expect_result[0], res['qos_specs'][0]['id'])
self.assertEqual(expect_result[1], res['qos_specs'][1]['id'])
self.assertEqual(expect_result[2], res['qos_specs'][2]['id'])
self.assertEqual(expect_result[3], res['qos_specs'][3]['id'])
def test_index_with_sort_keys_and_sort_dirs(self):
url = '/v2/%s/qos-specs?sort=id:asc' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res = self.controller.index(req)
self.assertEqual(4, len(res['qos_specs']))
expect_result = [self.qos_id1, self.qos_id2,
self.qos_id3, self.qos_id4]
expect_result.sort()
self.assertEqual(expect_result[0], res['qos_specs'][0]['id'])
self.assertEqual(expect_result[1], res['qos_specs'][1]['id'])
self.assertEqual(expect_result[2], res['qos_specs'][2]['id'])
self.assertEqual(expect_result[3], res['qos_specs'][3]['id'])
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.delete',
side_effect=return_qos_specs_delete)
def test_qos_specs_delete(self, mock_qos_delete, mock_qos_get_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.controller.delete(req, fake.QOS_SPEC_ID)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.delete',
side_effect=return_qos_specs_delete)
def test_qos_specs_delete_not_found(self, mock_qos_delete,
mock_qos_get_specs):
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.delete, req,
fake.WILL_NOT_BE_FOUND_ID)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.delete',
side_effect=return_qos_specs_delete)
def test_qos_specs_delete_inuse(self, mock_qos_delete,
mock_qos_get_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (
fake.PROJECT_ID, fake.IN_USE_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, fake.IN_USE_ID)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.delete',
side_effect=return_qos_specs_delete)
def test_qos_specs_delete_inuse_force(self, mock_qos_delete,
mock_qos_get_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s?force=True' %
(fake.PROJECT_ID, fake.IN_USE_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.delete,
req, fake.IN_USE_ID)
self.assertEqual(1, notifier.get_notification_count())
def test_qos_specs_delete_with_invalid_force(self):
invalid_force = "invalid_bool"
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/delete_keys?force=%s' %
(fake.PROJECT_ID, fake.QOS_SPEC_ID, invalid_force))
self.assertRaises(exception.InvalidParameterValue,
self.controller.delete,
req, fake.QOS_SPEC_ID)
@mock.patch('cinder.volume.qos_specs.delete_keys',
side_effect=return_qos_specs_delete_keys)
def test_qos_specs_delete_keys(self, mock_qos_delete_keys):
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' %
(fake.PROJECT_ID, fake.IN_USE_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.controller.delete_keys(req, fake.IN_USE_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.delete_keys',
side_effect=return_qos_specs_delete_keys)
def test_qos_specs_delete_keys_qos_notfound(self, mock_qos_specs_delete):
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.delete_keys,
req, fake.WILL_NOT_BE_FOUND_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.delete_keys',
side_effect=return_qos_specs_delete_keys)
def test_qos_specs_delete_keys_badkey(self, mock_qos_specs_delete):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' %
(fake.PROJECT_ID, fake.IN_USE_ID))
body = {"keys": ['foo', 'zoo']}
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(exception.QoSSpecsKeyNotFound,
self.controller.delete_keys,
req, fake.IN_USE_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.delete_keys',
side_effect=return_qos_specs_delete_keys)
def test_qos_specs_delete_keys_get_notifier(self, mock_qos_delete_keys):
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' %
(fake.PROJECT_ID, fake.IN_USE_ID))
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier,
autospec=True) as mock_get_notifier:
self.controller.delete_keys(req, fake.IN_USE_ID, body)
mock_get_notifier.assert_called_once_with('QoSSpecs')
@mock.patch('cinder.volume.qos_specs.create',
side_effect=return_qos_specs_create)
@mock.patch('cinder.utils.validate_dictionary_string_length')
def test_create(self, mock_validate, mock_qos_spec_create):
body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID,
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
res_dict = self.controller.create(req, body)
self.assertEqual(1, notifier.get_notification_count())
self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID,
res_dict['qos_specs']['name'])
self.assertTrue(mock_validate.called)
@mock.patch('cinder.volume.qos_specs.create',
side_effect=return_qos_specs_create)
def test_create_invalid_input(self, mock_qos_get_specs):
body = {"qos_specs": {"name": 'qos_spec_%s' % fake.INVALID_ID,
"consumer": "invalid_consumer"}}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.create',
side_effect=return_qos_specs_create)
def test_create_conflict(self, mock_qos_spec_create):
body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ALREADY_EXISTS_ID,
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, req, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.create',
side_effect=return_qos_specs_create)
def test_create_failed(self, mock_qos_spec_create):
body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ACTION_FAILED_ID,
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID)
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body)
self.assertEqual(1, notifier.get_notification_count())
@ddt.data({'foo': {'a': 'b'}},
{'qos_specs': {'a': 'b'}},
{'qos_specs': 'string'},
None)
def test_create_invalid_body_bad_request(self, body):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID,
use_admin_context=True)
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@ddt.data({'name': 'fake_name', 'a' * 256: 'a'},
{'name': 'fake_name', 'a': 'a' * 256},
{'name': 'fake_name', '': 'a'})
def test_create_qos_with_invalid_specs(self, value):
body = {'qos_specs': value}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID,
use_admin_context=True)
req.method = 'POST'
self.assertRaises(exception.InvalidInput,
self.controller.create, req, body)
@ddt.data({'name': None},
{'name': 'n' * 256},
{'name': ''},
{'name': ' '})
def test_create_qos_with_invalid_spec_name(self, value):
body = {'qos_specs': value}
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID,
use_admin_context=True)
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@mock.patch('cinder.volume.qos_specs.update',
side_effect=return_qos_specs_update)
def test_update(self, mock_qos_update):
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' %
(fake.PROJECT_ID, fake.QOS_SPEC_ID))
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
res = self.controller.update(req, fake.QOS_SPEC_ID, body)
self.assertDictMatch(body, res)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.update',
side_effect=return_qos_specs_update)
def test_update_not_found(self, mock_qos_update):
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID))
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.update,
req, fake.WILL_NOT_BE_FOUND_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.update',
side_effect=return_qos_specs_update)
def test_update_invalid_input(self, mock_qos_update):
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' %
(fake.PROJECT_ID, fake.INVALID_ID))
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(exception.InvalidQoSSpecs,
self.controller.update,
req, fake.INVALID_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.update',
side_effect=return_qos_specs_update)
def test_update_failed(self, mock_qos_update):
notifier = fake_notifier.get_fake_notifier()
with mock.patch('cinder.rpc.get_notifier', return_value=notifier):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' %
(fake.PROJECT_ID,
fake.UPDATE_FAILED_ID))
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.update,
req, fake.UPDATE_FAILED_ID, body)
self.assertEqual(1, notifier.get_notification_count())
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
def test_show(self, mock_get_qos_specs):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID))
res_dict = self.controller.show(req, fake.QOS_SPEC_ID)
self.assertEqual(fake.QOS_SPEC_ID, res_dict['qos_specs']['id'])
self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID,
res_dict['qos_specs']['name'])
@mock.patch('cinder.volume.qos_specs.get_associations',
side_effect=return_get_qos_associations)
def test_get_associations(self, mock_get_assciations):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associations' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID))
res = self.controller.associations(req, fake.QOS_SPEC_ID)
self.assertEqual('FakeVolTypeName',
res['qos_associations'][0]['name'])
self.assertEqual(fake.VOLUME_TYPE_ID,
res['qos_associations'][0]['id'])
@mock.patch('cinder.volume.qos_specs.get_associations',
side_effect=return_get_qos_associations)
def test_get_associations_not_found(self, mock_get_assciations):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associations' %
(fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.associations,
req, fake.WILL_NOT_BE_FOUND_ID)
@mock.patch('cinder.volume.qos_specs.get_associations',
side_effect=return_get_qos_associations)
def test_get_associations_failed(self, mock_get_associations):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associations' % (
fake.PROJECT_ID, fake.RAISE_ID))
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associations,
req, fake.RAISE_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.associate_qos_with_type',
side_effect=return_associate_qos_specs)
def test_associate(self, mock_associate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associate?vol_type_id=%s' %
(fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID))
res = self.controller.associate(req, fake.QOS_SPEC_ID)
self.assertEqual(202, res.status_int)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.associate_qos_with_type',
side_effect=return_associate_qos_specs)
def test_associate_no_type(self, mock_associate, mock_get_qos):
req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/associate' %
(fake.PROJECT_ID, fake.QOS_SPEC_ID))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.associate, req, fake.QOS_SPEC_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.associate_qos_with_type',
side_effect=return_associate_qos_specs)
def test_associate_not_found(self, mock_associate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % (
fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID,
fake.VOLUME_TYPE_ID))
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.associate, req,
fake.WILL_NOT_BE_FOUND_ID)
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associate?vol_type_id=%s' %
(fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.VolumeTypeNotFound,
self.controller.associate, req, fake.QOS_SPEC_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.associate_qos_with_type',
side_effect=return_associate_qos_specs)
def test_associate_fail(self, mock_associate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/associate?vol_type_id=%s' %
(fake.PROJECT_ID, fake.ACTION_FAILED_ID, fake.VOLUME_TYPE_ID))
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associate, req,
fake.ACTION_FAILED_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_qos_specs',
side_effect=return_associate_qos_specs)
def test_disassociate(self, mock_disassociate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID))
res = self.controller.disassociate(req, fake.QOS_SPEC_ID)
self.assertEqual(202, res.status_int)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_qos_specs',
side_effect=return_associate_qos_specs)
def test_disassociate_no_type(self, mock_disassociate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.disassociate, req, fake.QOS_SPEC_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_qos_specs',
side_effect=return_associate_qos_specs)
def test_disassociate_not_found(self, mock_disassociate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (
fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID,
fake.VOLUME_TYPE_ID))
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.disassociate, req,
fake.WILL_NOT_BE_FOUND_ID)
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' %
(fake.PROJECT_ID, fake.VOLUME_TYPE_ID, fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.VolumeTypeNotFound,
self.controller.disassociate, req,
fake.VOLUME_TYPE_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_qos_specs',
side_effect=return_associate_qos_specs)
def test_disassociate_failed(self, mock_disassociate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (
fake.PROJECT_ID, fake.ACTION2_FAILED_ID, fake.VOLUME_TYPE_ID))
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate, req,
fake.ACTION2_FAILED_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_all',
side_effect=return_disassociate_all)
def test_disassociate_all(self, mock_disassociate, mock_get_qos):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate_all' % (
fake.PROJECT_ID, fake.QOS_SPEC_ID))
res = self.controller.disassociate_all(req, fake.QOS_SPEC_ID)
self.assertEqual(202, res.status_int)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_all',
side_effect=return_disassociate_all)
def test_disassociate_all_not_found(self, mock_disassociate, mock_get):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate_all' % (
fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.QoSSpecsNotFound,
self.controller.disassociate_all, req,
fake.WILL_NOT_BE_FOUND_ID)
@mock.patch('cinder.volume.qos_specs.get_qos_specs',
side_effect=return_qos_specs_get_qos_specs)
@mock.patch('cinder.volume.qos_specs.disassociate_all',
side_effect=return_disassociate_all)
def test_disassociate_all_failed(self, mock_disassociate, mock_get):
req = fakes.HTTPRequest.blank(
'/v2/%s/qos-specs/%s/disassociate_all' % (
fake.PROJECT_ID, fake.ACTION2_FAILED_ID))
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate_all, req,
fake.ACTION2_FAILED_ID)
| |
# Copyright (c) 2009-2012, Andrew McNabb
# Copyright (c) 2013, Kristoffer Gronlund
from errno import EINTR
from subprocess import Popen, PIPE
import os
import signal
import sys
import time
import traceback
from psshlib import askpass_client
BUFFER_SIZE = 1 << 16
try:
bytes
except NameError:
bytes = str
class Task(object):
"""Starts a process and manages its input and output.
Upon completion, the `exitstatus` attribute is set to the exit status
of the process.
"""
def __init__(self,
host,
port,
user,
cmd,
verbose=False,
quiet=False,
stdin=None,
print_out=False,
inline=False,
inline_stdout=False,
default_user=None):
# Backwards compatibility:
if not isinstance(verbose, bool):
opts = verbose
verbose = opts.verbose
quiet = opts.quiet
try:
print_out = bool(opts.print_out)
except AttributeError:
print_out = False
try:
inline = bool(opts.inline)
except AttributeError:
inline = False
try:
inline_stdout = bool(opts.inline_stdout)
except AttributeError:
inline_stdout = False
default_user = opts.user
self.exitstatus = None
self.host = host
self.pretty_host = host
self.port = port
self.cmd = cmd
if user and user != default_user:
self.pretty_host = '@'.join((user, self.pretty_host))
if port:
self.pretty_host = ':'.join((self.pretty_host, port))
self.proc = None
self.writer = None
self.timestamp = None
self.failures = []
self.killed = False
self.inputbuffer = stdin
self.byteswritten = 0
self.outputbuffer = bytes()
self.errorbuffer = bytes()
self.stdin = None
self.stdout = None
self.stderr = None
self.outfile = None
self.errfile = None
# Set options.
self.verbose = verbose
self.quiet = quiet
self.print_out = print_out
self.inline = inline
self.inline_stdout = inline_stdout
def start(self, nodenum, iomap, writer, askpass_socket=None):
"""Starts the process and registers files with the IOMap."""
self.writer = writer
if writer:
self.outfile, self.errfile = writer.open_files(self.pretty_host)
# Set up the environment.
environ = dict(os.environ)
environ['PSSH_NODENUM'] = str(nodenum)
environ['PSSH_HOST'] = self.host
# Disable the GNOME pop-up password dialog and allow ssh to use
# askpass.py to get a provided password. If the module file is
# askpass.pyc, we replace the extension.
environ['SSH_ASKPASS'] = askpass_client.executable_path()
if askpass_socket:
environ['PSSH_ASKPASS_SOCKET'] = askpass_socket
if self.verbose:
environ['PSSH_ASKPASS_VERBOSE'] = '1'
# Work around a mis-feature in ssh where it won't call SSH_ASKPASS
# if DISPLAY is unset.
if 'DISPLAY' not in environ:
environ['DISPLAY'] = 'pssh-gibberish'
# Create the subprocess. Since we carefully call set_cloexec() on
# all open files, we specify close_fds=False.
self.proc = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=False, preexec_fn=os.setsid, env=environ)
self.timestamp = time.time()
if self.inputbuffer:
self.stdin = self.proc.stdin
iomap.register_write(self.stdin.fileno(), self.handle_stdin)
else:
self.proc.stdin.close()
self.stdout = self.proc.stdout
iomap.register_read(self.stdout.fileno(), self.handle_stdout)
self.stderr = self.proc.stderr
iomap.register_read(self.stderr.fileno(), self.handle_stderr)
def _kill(self):
"""Signals the process to terminate."""
if self.proc:
try:
os.kill(-self.proc.pid, signal.SIGKILL)
except OSError:
# If the kill fails, then just assume the process is dead.
pass
self.killed = True
def timedout(self):
"""Kills the process and registers a timeout error."""
if not self.killed:
self._kill()
self.failures.append('Timed out')
def interrupted(self):
"""Kills the process and registers an keyboard interrupt error."""
if not self.killed:
self._kill()
self.failures.append('Interrupted')
def cancel(self):
"""Stops a task that has not started."""
self.failures.append('Cancelled')
def elapsed(self):
"""Finds the time in seconds since the process was started."""
return time.time() - self.timestamp
def running(self):
"""Finds if the process has terminated and saves the return code."""
if self.stdin or self.stdout or self.stderr:
return True
if self.proc:
self.exitstatus = self.proc.poll()
if self.exitstatus is None:
if self.killed:
# Set the exitstatus to what it would be if we waited.
self.exitstatus = -signal.SIGKILL
return False
else:
return True
else:
if self.exitstatus < 0:
message = 'Killed by signal %s' % (-self.exitstatus)
self.failures.append(message)
elif self.exitstatus > 0:
message = 'Exited with error code %s' % self.exitstatus
self.failures.append(message)
self.proc = None
return False
def handle_stdin(self, fd, iomap):
"""Called when the process's standard input is ready for writing."""
try:
start = self.byteswritten
if start < len(self.inputbuffer):
chunk = self.inputbuffer[start:start+BUFFER_SIZE]
self.byteswritten = start + os.write(fd, chunk)
else:
self.close_stdin(iomap)
except (OSError, IOError):
_, e, _ = sys.exc_info()
if e.errno != EINTR:
self.close_stdin(iomap)
self.log_exception(e)
def close_stdin(self, iomap):
if self.stdin:
iomap.unregister(self.stdin.fileno())
self.stdin.close()
self.stdin = None
def handle_stdout(self, fd, iomap):
"""Called when the process's standard output is ready for reading."""
try:
buf = os.read(fd, BUFFER_SIZE)
if buf:
if self.inline or self.inline_stdout:
if self.quiet:
self.outputbuffer += "%s: %s" % (self.host, buf)
else:
self.outputbuffer += buf
if self.outfile:
self.writer.write(self.outfile, buf)
if self.print_out:
for l in buf.split('\n'):
sys.stdout.write("%s: %s\n" % (self.host, l))
else:
self.close_stdout(iomap)
except (OSError, IOError):
_, e, _ = sys.exc_info()
if e.errno != EINTR:
self.close_stdout(iomap)
self.log_exception(e)
def close_stdout(self, iomap):
if self.stdout:
iomap.unregister(self.stdout.fileno())
self.stdout.close()
self.stdout = None
if self.outfile:
self.writer.close(self.outfile)
self.outfile = None
def handle_stderr(self, fd, iomap):
"""Called when the process's standard error is ready for reading."""
try:
buf = os.read(fd, BUFFER_SIZE)
if buf:
if self.inline:
self.errorbuffer += buf
if self.errfile:
self.writer.write(self.errfile, buf)
else:
self.close_stderr(iomap)
except (OSError, IOError):
_, e, _ = sys.exc_info()
if e.errno != EINTR:
self.close_stderr(iomap)
self.log_exception(e)
def close_stderr(self, iomap):
if self.stderr:
iomap.unregister(self.stderr.fileno())
self.stderr.close()
self.stderr = None
if self.errfile:
self.writer.close(self.errfile)
self.errfile = None
def log_exception(self, e):
"""Saves a record of the most recent exception for error reporting."""
if self.verbose:
exc_type, exc_value, exc_traceback = sys.exc_info()
exc = ("Exception: %s, %s, %s" %
(exc_type, exc_value, traceback.format_tb(exc_traceback)))
else:
exc = str(e)
self.failures.append(exc)
# vim:ts=4:sw=4:et:
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
import pytest
from sqlalchemy.orm import eagerload
from airflow import models
from airflow.api.common.experimental.mark_tasks import (
_create_dagruns,
set_dag_run_state_to_failed,
set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_state,
)
from airflow.models import DagRun
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
DEV_NULL = "/dev/null"
@pytest.fixture(scope="module")
def dagbag():
from airflow.models.dagbag import DagBag
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
return DagBag(read_dags_from_db=True)
class TestMarkTasks:
@pytest.fixture(scope="class", autouse=True, name="create_dags")
@classmethod
def create_dags(cls, dagbag):
cls.dag1 = dagbag.get_dag('miscellaneous_test_dag')
cls.dag2 = dagbag.get_dag('example_subdag_operator')
cls.dag3 = dagbag.get_dag('example_trigger_target_dag')
cls.execution_dates = [days_ago(2), days_ago(1)]
start_date3 = cls.dag3.start_date
cls.dag3_execution_dates = [
start_date3,
start_date3 + timedelta(days=1),
start_date3 + timedelta(days=2),
]
@pytest.fixture(autouse=True)
def setup(self):
clear_db_runs()
drs = _create_dagruns(
self.dag1, self.execution_dates, state=State.RUNNING, run_type=DagRunType.SCHEDULED
)
for dr in drs:
dr.dag = self.dag1
drs = _create_dagruns(
self.dag2, [self.dag2.start_date], state=State.RUNNING, run_type=DagRunType.SCHEDULED
)
for dr in drs:
dr.dag = self.dag2
drs = _create_dagruns(
self.dag3, self.dag3_execution_dates, state=State.SUCCESS, run_type=DagRunType.MANUAL
)
for dr in drs:
dr.dag = self.dag3
yield
clear_db_runs()
@staticmethod
def snapshot_state(dag, execution_dates):
TI = models.TaskInstance
DR = models.DagRun
with create_session() as session:
return (
session.query(TI)
.join(TI.dag_run)
.options(eagerload(TI.dag_run))
.filter(TI.dag_id == dag.dag_id, DR.execution_date.in_(execution_dates))
.all()
)
@provide_session
def verify_state(self, dag, task_ids, execution_dates, state, old_tis, session=None):
TI = models.TaskInstance
DR = models.DagRun
tis = (
session.query(TI)
.join(TI.dag_run)
.options(eagerload(TI.dag_run))
.filter(TI.dag_id == dag.dag_id, DR.execution_date.in_(execution_dates))
.all()
)
assert len(tis) > 0
for ti in tis:
assert ti.operator == dag.get_task(ti.task_id).task_type
if ti.task_id in task_ids and ti.execution_date in execution_dates:
assert ti.state == state
if state in State.finished:
assert ti.end_date is not None
else:
for old_ti in old_tis:
if old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date:
assert ti.state == old_ti.state
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=False,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], None, snapshot)
# set one and only one task to success
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set no tasks
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 0
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.FAILED,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.FAILED, snapshot)
# don't alter other tasks
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set one task as FAILED. dag3 has schedule_interval None
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.FAILED,
commit=True,
)
# exactly one TaskInstance should have been altered
assert len(altered) == 1
# task should have been marked as failed
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[1]], State.FAILED, snapshot)
# tasks on other days should be unchanged
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=True,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 3
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=True,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 4
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=True,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=True,
past=False,
state=State.FAILED,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[1:], State.FAILED, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=True,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=True,
state=State.FAILED,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[:2], State.FAILED, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
def test_mark_tasks_multiple(self):
# set multiple tasks to success
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
tasks = [self.dag1.get_task("runme_1"), self.dag1.get_task("runme_2")]
altered = set_state(
tasks=tasks,
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(
self.dag1, [task.task_id for task in tasks], [self.execution_dates[0]], State.SUCCESS, snapshot
)
# TODO: this backend should be removed once a fixing solution is found later
# We skip it here because this test case is working with Postgres & SQLite
# but not with MySQL
@pytest.mark.backend("sqlite", "postgres")
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=True,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 14
# cannot use snapshot here as that will require drilling down the
# sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]], State.SUCCESS, [])
class TestMarkDAGRun(unittest.TestCase):
INITIAL_TASK_STATES = {
'runme_0': State.SUCCESS,
'runme_1': State.SKIPPED,
'runme_2': State.UP_FOR_RETRY,
'also_run_this': State.QUEUED,
'run_after_loop': State.RUNNING,
'run_this_last': State.FAILED,
}
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True, read_dags_from_db=False)
cls.dag1 = dagbag.dags['miscellaneous_test_dag']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1), days_ago(0)]
def setUp(self):
clear_db_runs()
def _get_num_tasks_with_starting_state(self, state: State, inclusion: bool):
"""
If ``inclusion=True``, get num tasks with initial state ``state``.
Otherwise, get number tasks with initial state not equal to ``state``
:param state: State to compare against
:param inclusion: whether to look for inclusion or exclusion
:return: number of tasks meeting criteria
"""
states = self.INITIAL_TASK_STATES.values()
def compare(x, y):
return x == y if inclusion else x != y
return len([s for s in states if compare(s, state)])
def _set_default_task_instance_states(self, dr):
for task_id, state in self.INITIAL_TASK_STATES.items():
dr.get_task_instance(task_id).set_state(state)
def _verify_task_instance_states_remain_default(self, dr):
for task_id, state in self.INITIAL_TASK_STATES.items():
assert dr.get_task_instance(task_id).state == state
@provide_session
def _verify_task_instance_states(self, dag, date, state, session=None):
TI = models.TaskInstance
tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date == date)
for ti in tis:
assert ti.state == state
def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_type=DagRunType.MANUAL, state=state, start_date=date, execution_date=date
)
def _verify_dag_run_state(self, dag, date, state):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
assert dr.get_state() == state
@provide_session
def _verify_dag_run_dates(self, dag, date, state, middle_time, session=None):
# When target state is RUNNING, we should set start_date,
# otherwise we should set end_date.
DR = DagRun
dr = session.query(DR).filter(DR.dag_id == dag.dag_id, DR.execution_date == date).one()
if state == State.RUNNING:
# Since the DAG is running, the start_date must be updated after creation
assert dr.start_date > middle_time
# If the dag is still running, we don't have an end date
assert dr.end_date is None
else:
# If the dag is not running, there must be an end time
assert dr.start_date < middle_time
assert dr.end_date > middle_time
def test_set_running_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_running_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_running_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, only the dag itself
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_success_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_success_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_success_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, but only the dag object should be changed
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_failed_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_failed_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_failed_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, since we've only altered the DAG itself
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
self._set_default_task_instance_states(dr)
will_be_altered = set_dag_run_state_to_running(self.dag1, date, commit=False)
# None of the tasks will be altered.
assert len(will_be_altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_failed(self.dag1, date, commit=False)
# Only the running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(will_be_altered) == expected
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_success(self.dag1, date, commit=False)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(will_be_altered) == expected
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
@provide_session
def test_set_state_with_multiple_dagruns(self, session=None):
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
execution_date=self.execution_dates[0],
session=session,
)
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
execution_date=self.execution_dates[1],
session=session,
)
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=session,
)
altered = set_dag_run_state_to_success(self.dag2, self.execution_dates[1], commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
assert len(altered) == count_dag_tasks(self.dag2)
self._verify_dag_run_state(self.dag2, self.execution_dates[1], State.SUCCESS)
# Make sure other dag status are not changed
models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[0])
self._verify_dag_run_state(self.dag2, self.execution_dates[0], State.FAILED)
models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[2])
self._verify_dag_run_state(self.dag2, self.execution_dates[2], State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state_to_success(None, self.execution_dates[0])
assert len(altered) == 0
altered = set_dag_run_state_to_failed(None, self.execution_dates[0])
assert len(altered) == 0
altered = set_dag_run_state_to_running(None, self.execution_dates[0])
assert len(altered) == 0
# Invalid execution date
altered = set_dag_run_state_to_success(self.dag1, None)
assert len(altered) == 0
altered = set_dag_run_state_to_failed(self.dag1, None)
assert len(altered) == 0
altered = set_dag_run_state_to_running(self.dag1, None)
assert len(altered) == 0
# This will throw ValueError since dag.latest_execution_date
# need to be 0 does not exist.
with pytest.raises(ValueError):
set_dag_run_state_to_success(self.dag2, timezone.make_naive(self.execution_dates[0]))
# altered = set_dag_run_state_to_success(self.dag1, self.execution_dates[0])
# DagRun does not exist
# This will throw ValueError since dag.latest_execution_date does not exist
with pytest.raises(ValueError):
set_dag_run_state_to_success(self.dag2, self.execution_dates[0])
def test_set_dag_run_state_to_failed_no_running_tasks(self):
"""
set_dag_run_state_to_failed when there are no running tasks to update
"""
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
for task in self.dag1.tasks:
dr.get_task_instance(task.task_id).set_state(State.SUCCESS)
set_dag_run_state_to_failed(self.dag1, date)
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
| |
import argparse
import datetime
import imutils
import numpy as np
import time
import csv
import cv2
import os.path
import math
from PyQt4.QtCore import QObject, pyqtSignal, pyqtSlot
xyreturn = None
switch = 0
crp_lst = []
p1 = (0,0)
p2 = (1,1)
geo = 0
def divide_frame(event,x,y,flags,param):
global xyreturn, switch, crp_lst, geo
if event == cv2.EVENT_LBUTTONDOWN:
switch = 1
crp_lst = [(x,y)]
elif event == cv2.EVENT_LBUTTONUP:
switch = 0
crp_lst.append((x,y))
if event == cv2.EVENT_LBUTTONDBLCLK:
xyreturn = (x,y)
if event == cv2.EVENT_MOUSEMOVE and switch == 1:
crp_lst.append((x,y))
# if event == cv2.EVENT_RBUTTONDOWN:
# if geo == 0:
# geo = 1
# crp_lst = [(x,y)]
# else:
# geo = geo - 1
# crp_lst = [(x,y)]
def set_input(videofile):
"""Get capture of video file.If not defined, return Webcam output """
# if the video argument is None, then we are reading from webcam
if videofile is None:
return cv2.VideoCapture(0)
time.sleep(0.25)
else:
return cv2.VideoCapture(videofile)
def fix_point(capture):
global xyreturn,switch,p1,p2
(grabbed,frame) = capture.read()
#frame = frame[pt1y:pt2y,pt1x:pt2x]
height,width,channel = frame.shape
xyreturn=None
cv2.namedWindow("FlumeView")
cv2.setMouseCallback("FlumeView",divide_frame)
while xyreturn == None:
(grabbed,frame) = capture.read()
key = cv2.waitKey(30) & 0xFF
if len(crp_lst) >= 1 and geo != 1:
cv2.rectangle(frame,min(crp_lst),crp_lst[-1],(0,0,255),2)
# geo = 0
# if len(crp_lst) >= 1 and geo == 1:
#
# a = ((crp_lst[-1][0]-min(crp_lst)[0])^2)/float(width)
# b = ((crp_lst[-1][1]-min(crp_lst)[1])^2)/float(height)
# c = math.sqrt(a+b)
# r = math.hypot((crp_lst[-1][0]-min(crp_lst)[0]),(crp_lst[-1][1]-min(crp_lst)[1]))
# d = 2*math.pi*r
# cv2.circle(frame,min(crp_lst),int(r),(0,0,255),2)
# print(a,b,c,d,r)
# else:
# crp_lst.append((0,0))
# crp_lst.append((int(width),int(height)))
cv2.imshow("FlumeView",frame)
if key == ord("c"):
p1 = min(crp_lst)[0]/float(width),min(crp_lst)[1]/float(height)
p2 = crp_lst[-1][0]/float(width),crp_lst[-1][1]/float(height)
# p1 =(min(crp_lst)[0]/float(width),min(crp_lst)[1]/float(height))
# p2 =(crp_lst[-1][0]/float(width),crp_lst[-1][1]/float(height))
# cv2.waitKey(30)
if xyreturn != None:
cv2.destroyWindow("FlumeView")
capture.release()
#print(xyreturn[1]/width)
return (float(xyreturn[0])/float(width),float(xyreturn[1])/float(height))
# create class "analyser" shell for further calculations
class analyser(QObject):
newData = pyqtSignal(float,float,int,name='newData')
newFrame = pyqtSignal(int,name='newFrame')
countSig = pyqtSignal(bool,name='countSignal')
framecount = pyqtSignal(int,name='framecount')
#frameshape = pyqtSignal(int,int,name='frameshape')
def __init__(self,videofile,x,y,wait,min_area,timelimit,refresh,show):
QObject.__init__(self)
self.capture = self.set_input(videofile)
self.divide_x = x
self.divide_y = y
self.wait = wait
self.firstFrame = None
self.min_area = min_area
self.timelimit = timelimit
self.refresh = refresh
self.show = show
self.lastframe = None
def set_input(self, videofile):
"""Get capture of video file.If not defined, return Webcam output """
# if the video argument is None, then we are reading from webcam
if videofile is None:
return cv2.VideoCapture(0)
#time.sleep(0.25)
else:
return cv2.VideoCapture(videofile)
def start(self):
#cv2.namedWindow("Security Feed")
self.fps = self.capture.get(cv2.cv.CV_CAP_PROP_FPS)
self.frame_count = 0
self.count_start = bool
self.trace_xy = []
self.dist = []
#self.firstFrame = None
#
# if self.firstFrame is None:
# self.firstFrame = gray
while True:
if (self.frame_count/self.fps)>self.timelimit:
self.capture.release()
return(-1,-1)
else:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, self.frame) = self.capture.read()
#qt_image = QtWidgets.QImage(frame)
#self.newFrame.emit(frame)
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
print("finished")
return(-1,-1)
# resize the frame, convert it to grayscale, and blur it
frame = self.frame
#frame = frame[pt1y:pt2y,pt1x:pt2x]
frame = imutils.resize(frame, width=500)
height, width, channels = frame.shape
cv2.rectangle(frame,(int(p1[0]*float(width)),int(p1[1]*float(height))),(int(p2[0]*float(width)),int(p2[1]*float(height))),(0,0,255),2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
#if self.firstFrame is None:
if self.firstFrame == None:
self.firstFrame = gray
self.frame_count += 1
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(self.firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if (self.frame_count/self.fps)<self.wait:
count_start = False
else:
count_start = True
self.framecount.emit(self.frame_count)
self.countSig.emit(count_start)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < self.min_area:
continue
if count_start == True:
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
if p2[0] >= self.divide_x >= p1[0]:
cv2.line(frame,(int(width*self.divide_x),int(p1[1]*height)),(int(width*self.divide_x),int(p2[1]*height)),(255,0,0))
else:
print("ERROR: Center divide outside of bounding area")
if p2[1] >= self.divide_y >= p1[1]:
cv2.line(frame,(int(p1[0]*width),int(self.divide_y*height)),(int(p2[0]*width),int(self.divide_y*height)),(255,0,0))
else:
print("ERROR: Center divide outside of bounding area")
fish_x = float(x+w/2) / float(width)
fish_y = float(y+h/2) / float(height)
# if (float(pt1x)/float(width))<fish_x<(float(pt2x)/float(width)) and (float(pt1y)/float(height))<fish_y<(float(pt2y)/float(height)):
self.trace_xy.append((fish_x,fish_y))
self.newData.emit(fish_x,fish_y,self.frame_count)
#self.height,self.width,channel = frame.shape
for i,element in enumerate(self.trace_xy):
if i > 0:
element = (int(element[0]*width),int(element[1]*height))
previous_element = self.trace_xy[i-1]
previous_element = (int(previous_element[0]*width),int(previous_element[1]*height))
# Calculating euclidean distance between points:
dist_euclidean = np.linalg.norm(np.array(previous_element)-np.array(element))
self.dist.append(dist_euclidean)
self.dist = self.dist[-5:]
#dist_mean = cv2.mean(np.array(self.dist))
dist_mean = np.mean(self.dist)
#print(self.dist_mean[0])
#cv2.line(frame,element,previous_element,(125, 20,200),2)
#text = "Occupied"
#if dist_euclidean < dist_mean*2:
#cv2.line(frame,element,previous_element,((self.frame_count),0,self.frame_count-2),2)
cv2.line(frame,element,previous_element,(0,255,0),2)
#cv2.line(frame,element,previous_element,(125,20,200),2)
#cv2.circle(frame,element,1,(self.frame_count,255,0),1)
else:
print("Wait:"+str("{0:.2f}".format(self.frame_count/self.fps))+" s ;"+str(self.frame_count)+" frames")
self.lastframe = frame
if self.show == True:
cv2.imshow("FlumeView - Live",frame)
cv2.waitKey(25)
#matplotlib
# if fish_x < divide_x and fish_y < divide_y:
# channel_A += 1
# if fish_x > divide_x and fish_y < divide_y:
# area_A += 1
# if fish_x < divide_x and fish_y > divide_y:
# channel_B += 1
# if fish_x > divide_x and fish_y > divide_y:
# area_B += 1
# division lines
# height, width, channels = frame.shape
# cv2.line(frame,(0,divide_y),(width,divide_y),(255,0,0))
# cv2.line(frame,(divide_x,0),(divide_x,height),(255,0,0))
# tags
# fontsize = 1
# thickness = 1
# cv2.putText(frame,"{0:.2f}".format(fps)+" fps",(25,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,255)
# cv2.putText(frame,"{0:.2f}".format(channel_A/fps),(divide_x-width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
# cv2.putText(frame,"{0:.2f}".format(channel_B/fps),(divide_x-width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
# cv2.putText(frame,"{0:.2f}".format(area_A/fps),(divide_x+width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
# cv2.putText(frame,"{0:.2f}".format(area_B/fps),(divide_x+width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
# cv2.putText(frame,"{0:.2f}".format(frame_count/fps)+" time (s)",(divide_x+width/4,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0))
# show the frame and record if the user presses a key
# cv2.imshow("Security Feed", frame)
# cv2.imshow("Thresh", thresh)
# cv2.imshow("Frame Delta", frameDelta)
# key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the loop
# if key == ord("q"):
# break
| |
from calendar_layout import *
#from update import *
from Tkinter import *
import sqlite3
import matrices
import numpy as np
import auto
import export
import random
import datetime as datetime
import urllib2
from bs4 import BeautifulSoup
import csv
import sys
import globalvars
# Menu and Frame Functions
def doNothing():
print "Ok!"
def doQuit():
root.destroy()
def doDownloadClasses():
print 'Classes Downloaded'
import update_classes
#import sys
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter 4 digit Code of Term:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.myLabel2 = Label(top, text='Enter Location to Save Classes: Documents/')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=lambda: self.send())
self.mySubmitButton.pack()
def send(self):
self.value = [self.myEntryBox.get(), self.myEntryBox2.get()]
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
a = onClick()
import os
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + a[1]
update_classes.update_classes(a[0], docu_path,d)
update_classes.deleteExtraRecords(docu_path, d)
message = "Classes for Term " + a[0] + " Downloaded!"
d.set(message)
def doUpdateClasses():
print 'Classes updated'
import update_classes_table as uct
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel2 = Label(top, text='Enter Location of File: Documents/')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=lambda: self.send())
self.mySubmitButton.pack()
def send(self):
self.value = [self.myEntryBox2.get()]
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
filename = onClick()
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + filename[0]
uct.update_classes_table(docu_path)
message = "Analyzing Sections... Please be patient"
d.set(message)
global matrix_sections
matrix_sections = matrices.matrix_sections()
message = "Classes Updated From File"
d.set(message)
def doUpdateClassWorth():
print 'class worths updated'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('SELECT ShortName, Worth From Classes')
classes = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
height = len(classes)
width = 2
for i in range(height): #Rows
a = Label(top, text = classes[i][0])
a.grid(row = i+1, column = 1)
b = Entry(top)
b.insert(END, classes[i][1])
b.grid(row=i+1, column=2)
myLabel = Label(top, text = "Update Class Worth")
myLabel.grid(row = 0, column = 1, columnspan = 2)
mySubmitButton = Button(top, text='Update', command=lambda: self.send())
mySubmitButton.grid(row = 50, column = 1, columnspan = 2)
def send(self):
def find_in_grid(frame, row, column):
for children in frame.children.values():
info = children.grid_info()
#note that rows and column numbers are stored as string
if info['row'] == str(row) and info['column'] == str(column):
return children
return None
info = []
for i in range(len(classes)):
info.append(find_in_grid(self.top,i+1, 2).get())
self.value = info
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
worths = onClick()
# update in database
count = 0
for cl in classes:
cur.execute('UPDATE Classes SET Worth = ? WHERE ShortName = ?', (float(worths[count]), cl[0]))
count = count + 1
conn.commit()
message = "Updated Class Worths!"
d.set(message)
def doUpdateStudents():
print 'students updated'
import update_students_table as ust
import matrices
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Remember to rename survey response to \'students.tsv\'!')
self.myLabel.pack()
self.myLabel2 = Label(top, text='Enter File To Open:')
self.myLabel2.pack()
self.myEntryBox2 = Entry(top)
self.myEntryBox2.pack()
self.mySubmitButton = Button(top, text='Update', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox2.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
filename = onClick()
docu_path = os.path.join(os.path.expanduser("~"), "Documents")
docu_path = docu_path + "/" + filename
ust.update_students_table(docu_path, d)
matrices.matrix_pref(d)
globalvars.mat_prefs = np.load(globalvars.mat_prefs_path)
message = "Student Responses Updated!"
d.set(message)
# List Classes in popup box for google forms survey
def doListClasses():
print 'doListClasses'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='List of Classes:')
self.myLabel.pack()
self.myframe = Frame(top)
self.myframe.pack(fill = BOTH)
T = Text(self.myframe)
T.pack()
def addtolist(item):
T.insert(END, item[0] + " - " + item[1] + "\n")
for item in classes:
addtolist(item = item)
self.mySubmitButton = Button(top, text='Finished', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return()
onClick()
def doListProfessors():
print 'doListProfessors'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
profs = cur.execute('SELECT Name FROM Professors')
profs = cur.fetchall()
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='List of Professors:')
self.myLabel.pack()
self.myframe = Frame(top)
self.myframe.pack(fill = BOTH)
T = Text(self.myframe)
T.pack()
def addtolist(item):
T.insert(END, str(item[0])[1:-1] + "\n")
for item in profs:
addtolist(item = item)
self.mySubmitButton = Button(top, text='Finished', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return()
onClick()
class StatusBar(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.label = Label(self, text = "Welcome!", bd=1, relief=SUNKEN, anchor=W)
self.label.pack(fill=X)
def set(self, format, *args):
self.label.config(text=format % args)
self.label.update_idletasks()
def clear(self):
self.label.config(text="")
self.label.update_idletasks()
def doByClass():
print 'doByClass'
global scheduling
scheduling = 'class'
leftListbox.delete(0,END)
chosenListbox.delete(0,END)
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
for item in classes:
leftListbox.insert(END, item)
def doByStudent():
print 'doByStudent'
global scheduling
scheduling = 'student'
leftListbox.delete(0,END)
chosenListbox.delete(0,END)
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT StudentID, Name, Scheduled FROM Students')
classes = cur.fetchall()
for item in classes:
item = str(item[0]) + ": " + item[1] + " (" + str(item[2]) + ")"
leftListbox.insert(END, item)
def doAutomateFast():
print 'AutomateFast'
doSave(output2 = output)
message = "Generating TA Lines"
d.set(message)
auto.gen_sec_matrix(pop = 100, keep = 10, output = output)
auto.break_up(output)
global mat_sch
message = "Matching TAs with Lines"
d.set(message)
mat_sch = auto.gen_sec_stu_matrix(pop = 1000, keep = 1, mats = 10, output = output)[0]
global output
auto.updateDatabase(mat_sch, output)
message = "Schedule Created!"
d.set(message)
return()
def doAutomateBest():
print 'doAutomateBest'
doSave(output2 = output)
message = "Generating TA Lines"
d.set(message)
auto.gen_sec_matrix(pop = 1000, keep = 100, output = output)
auto.break_up(output)
global mat_sch
global mat_prefs
message = "Matching TAs with Lines"
d.set(message)
mat_sch = auto.gen_sec_stu_matrix(pop = 10000, keep = 1, mats = 100, output = output)[0]
auto.updateDatabase(mat_sch, output)
message = "Schedule Created!"
d.set(message)
return()
def doViewClass():
print 'doViewClass'
current = leftListbox.get(ANCHOR)[0]
global current_class
current_class = current
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.SectionID, B.Scheduled, B.Name, D.Day, D.Start, D.End
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current,))
classes = cur.fetchall()
cur.execute('''
SELECT B.StudentID, B.Name, A.SectionID
FROM Sections A INNER JOIN Students B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON A.ClassID = C.ClassID
WHERE C.ShortName = ?''', (current,))
tas = cur.fetchall()
lines = dict()
for ta in tas:
lines[ta[2]] = ta[1]
doCalendar(calendarFrame)
info = []
multiclass = dict()
for cl in classes:
name = "0"
if cl[1] == 1:
name = lines[cl[0]]
if len(cl[3]) > 1:
for c in cl[3]:
try:
block_in_Calendar(text = cl[2] + " (" + name + ")", open = cl[1], day = c, start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
info.append(cl[2] + ": " + name + " " + cl[4] + "-" + cl[5])
else:
try:
block_in_Calendar(text = cl[2] + " (" + name + ")", open = cl[1], day = cl[3], start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
#info.append(cl[2] + ": " + name + " " + cl[4] + "-" + cl[5])
add_info_Calendar(text = info, calendarFrame = calendarFrame)
return()
def doViewStudent(student):
print 'doViewStudent'
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
# immediate time conflicts
unavail = cur.execute('''SELECT D.Day, D.Start, D.End
FROM Con_Student_Time B INNER JOIN Times D
ON B.TimeID = D.TimeID
WHERE B.StudentID = ?''', (student, ))
unavail = cur.fetchall()
for ut in unavail:
block_in_Calendar(text = '', open = 3, day = ut[0], start = ut[1], end = ut[2], calendarFrame = calendarFrame)
prefer = cur.execute('''SELECT D.Day, D.Start, D.End
FROM Pref_Student_Time B INNER JOIN Times D
ON B.TimeID = D.TimeID
WHERE B.StudentID = ?''', (student, ))
prefer = cur.fetchall()
for pt in prefer:
block_in_Calendar(text = '', open = 2, day = pt[0], start = pt[1], end = pt[2], calendarFrame = calendarFrame)
cur.execute('SELECT Year, Division, Skill FROM Students WHERE StudentID = ?', (student,))
info = cur.fetchone()
add_info_Calendar(text = "Year: " + str(info[0]) + ", Div: " + str(info[1]) + ', Skill: ' + str(info[2]), calendarFrame = calendarFrame)
sch = cur.execute('SELECT Scheduled From Students WHERE StudentID = ?', (student,))
sch = cur.fetchone()
if sch[0] > 0:
sch_classes = cur.execute('''SELECT A.Name, C.ShortName, B.Name, D.Day, D.Start, D.End
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
INNER JOIN Sections_Times E
ON B.SectionID = E.SectionID
INNER JOIN Times D
ON E.TimeID = D.TimeID
WHERE A.StudentID = ?''', (student,))
sch_classes = cur.fetchall()
for cl in sch_classes:
if len(cl[3]) > 1:
for c in cl[3]:
try:
block_in_Calendar(text = cl[1] + " " + cl[2], open = 1, day = c, start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
else:
try:
block_in_Calendar(text = cl[1] + " " + cl[2], open = 1, day = cl[3], start = cl[4], end = cl[5], calendarFrame = calendarFrame)
except:
continue
def leftselect(): #Select
if scheduling == 'class':
print 'leftselect-class'
doViewClass()
current = leftListbox.get(ANCHOR)[0]
global current_class
current_class = current
chosenListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.Scheduled, B.Name, D.Day, D.Time
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current_class,))
classes = cur.fetchall()
# Insert sections in middle box
chosenListbox.insert(END, "any")
for item in classes:
#if item[0] != 0:
# item[0] = 1
chosenListbox.insert(END, item)
if scheduling == 'student':
print 'leftselect-student'
doCalendar(calendarFrame)
current = leftListbox.get(ANCHOR)
global current_student
current_student = current
stu = current.split(":")[0]
chosenListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
doViewStudent(stu)
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
# Insert classes in right box
# centerselect will select class to view sections
openListbox.delete(0,END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('SELECT ShortName, Name FROM Classes')
classes = cur.fetchall()
for item in classes:
openListbox.insert(END, item)
# get students available for current section
# list in order of mat_prefs value
def centerselect(): #View
if scheduling == 'class':
print 'centerselect-class'
current = chosenListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
cla = cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
sec1 = cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
global sec
sec = cur.fetchone()[0]
students = cur.execute('SELECT DISTINCT Name, StudentID, Scheduled FROM Students WHERE Scheduled < 0.9') # ie less than fully scheduled
students = cur.fetchall()
global mat_prefs
student_tuples = []
for i in range(len(students)):
if current_section != "any":
colnum = section_index[sec]
stu = students[i][1]
stuID = student_index[stu]
student_tuples.append((int(globalvars.mat_prefs[stuID,colnum]),(students[i][1],students[i][0], students[i][2]),i)) # update from i to stuindex
else:
student_tuples.append((int(0),(students[i][1], students[i][0], students[i][2]),i))
# sort p in order of highest value first
openListbox.delete(0,END)
if current_section != "any":
student_tuples = sorted(student_tuples, key = lambda student:student[0], reverse = True)
for item in student_tuples:
item = str(item[0]) + "; " + str(item[1][1]) + " (" + str(item[1][2]) + ")"
openListbox.insert(END, item)
openListbox.insert(END, "undergrad")
students = cur.execute('SELECT DISTINCT Name, StudentID, Scheduled FROM Students WHERE Scheduled > 0.9') # ie fully scheduled
students = cur.fetchall()
global mat_prefs
student_tuples = []
for i in range(len(students)):
if current_section != "any":
colnum = section_index[sec]
stu = students[i][1]
stuID = student_index[stu]
student_tuples.append((int(globalvars.mat_prefs[stuID,colnum]),(students[i][1],students[i][0], students[i][2]),i)) # update from i to stuindex
else:
student_tuples.append((int(0),(students[i][1], students[i][0], students[i][2]),i))
# sort p in order of highest value first
if current_section != "any":
student_tuples = sorted(student_tuples, key = lambda student:student[0], reverse = True)
for item in student_tuples:
item = str(item[0]) + "; " + str(item[1][1]) + " (" + str(item[1][2]) + ")"
openListbox.insert(END, item)
if scheduling == 'student':
print 'centerselect-student'
current = openListbox.get(ANCHOR)[0]
print current
global current_class
current_class = current
openListbox.delete(0, END)
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
classes = cur.execute('''
SELECT B.Scheduled, B.Name, D.Day, D.Time
FROM Classes A INNER JOIN Sections B
ON A.ClassID = B.ClassID
INNER JOIN Sections_Times C
ON B.SectionID = C.SectionID
INNER JOIN Times D
ON C.TimeID = D.TimeID
WHERE A.ShortName = ?''', (current_class,))
classes = cur.fetchall()
# Insert sections in middle box
openListbox.insert(END, "any")
for item in classes:
#if item[0] != 0:
# item[0] = 1
openListbox.insert(END, item)
def get_class_value(SectionID):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cla = cur.execute('''SELECT Worth FROM Sections WHERE SectionID = ?''', (SectionID,))
cla = cur.fetchone()[0]
return(cla)
def openaddselect(): #Schedule
# schedule student to class
if scheduling == 'class':
print 'add-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")[0]
sqlite3.connect(globalvars.database_path)
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current,))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass add student to specific section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
global mat_yes
global mat_no
global section_index
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(value) + float(oldvalue), stu) )
c = cur.execute('UPDATE Sections SET Scheduled = ? WHERE SectionID = ?' , (1, sec))
c = cur.execute('UPDATE Sections SET StudentID = ? WHERE SectionID = ?', (stu, sec)) # wonder why this doesn't work with an AND statement
conn.commit()
# byClass add student to any section of class
if current_section == "any":
print 'any'
addPrefForClass(stu)
global mat_yes
global mat_add
global mat_no
cur.execute('SELECT A.SectionID FROM Sections A INNER JOIN Classes B ON A.ClassID = B.ClassID WHERE B.ShortName = ?',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
print secs[0][0]
value = get_class_value(secs[0][0])
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(oldvalue) + float(value), stu) )
conn.commit()
doViewClass()
message = "Student Added to Class!"
centerselect()
d.set(message)
if scheduling == 'student':
print 'add-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent add student to single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
# mats
global mat_yes
global mat_no
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
# database
value = get_class_value(sec)
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(float(value) + float(oldvalue), stu) )
c = cur.execute('''UPDATE Sections SET Scheduled = ?
WHERE SectionID = ?''' , (1, sec))
c = cur.execute('''UPDATE Sections SET StudentID = ?
WHERE SectionID = ?''', (stu, sec))
# wonder why this doesn't work with an AND statement
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
chosenListbox.delete(0, END)
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
conn.commit()
#byStudent add student to any section of class
if current_section == "any":
print 'any'
# add in mats
addPrefForClass(stu)
global mat_yes
global mat_add
global mat_no
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
# add in database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(float(oldvalue) + float(value), stu) )
conn.commit()
chosenListbox.insert(END, 'any ' + current_class)
doViewStudent(stu)
message = "Student Added to Class!"
d.set(message)
def openremoveselect(): #Remove
# remove student from class
if scheduling == 'class':
print 'remove-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass remove student from single section
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
print sec, stu
# mats
global mat_yes
global section_index
secID = section_index[sec]
mat_yes[stuID, secID] = 0
# remove from database
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
print value, oldvalue
cur.execute('''UPDATE Students SET Scheduled = ?
WHERE StudentID = ?''',(oldvalue - value, stu) )
cur.execute('''Update Sections SET Scheduled = ?
WHERE SectionID = ?''' , (0, sec))
cur.execute('''Update Sections SET StudentID = ?
WHERE SectionID = ?''' , (stu, sec))
conn.commit()
# byClass remove student from any section
if current_section == "any":
print 'any'
# remove scheduled from mats
global mat_yes
global mat_add
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 0
# remove scheduled from database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('''SELECT Scheduled FROM Students
WHERE StudentID = ?''',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('''Update Students SET Scheduled = ?
WHERE StudentID = ?''',(float(oldvalue) - float(value), stu) )
conn.commit()
doViewClass()
message = "Student Removed from Class!"
d.set(message)
if scheduling == 'student':
print 'remove-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent remove student from single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
# mats
global mat_yes
global mat_no
global section_index
secID = section_index[sec] # what's sec from
mat_yes[stuID, secID] = 1
mat_no[stuID, secID] = 0
addPrefForClass(stu)
# remove from database
value = get_class_value(sec)
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(value) - float(oldvalue), stu) )
c = cur.execute('UPDATE Sections SET Scheduled = ? WHERE SectionID = ?' , (0, sec))
c = cur.execute('UPDATE Sections SET StudentID = ? WHERE SectionID = ?', (0, sec)) # wonder why this doesn't work with an AND statement
# Insert assigned sections in middle box
#chosenListbox.insert(END, "any")
cur.execute('''SELECT B.SectionID, C.ShortName, B.Name
FROM Students A INNER JOIN Sections B
ON A.StudentID = B.StudentID
INNER JOIN Classes C
ON B.ClassID = C.ClassID
WHERE A.StudentID = ?''', (stu,))
classes = cur.fetchall()
chosenListbox.delete(0, END)
for item in classes:
item = item[1] + " " + item[2]
chosenListbox.insert(END, item)
conn.commit()
# byStudent remove from any section
if current_section == "any":
print 'any'
global current_class
removePrefForClass(stu)
# mats
global mat_yes
global mat_add
global mat_no
cur.execute('SELECT A.SectionID FROM Sections A INNER JOIN Classes B ON A.ClassID = B.ClassID WHERE B.ShortName = ?',(current_class,))
secs = cur.fetchall()
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_add[stuID,secID] = 1
mat_no[stuID,secID] = 0
# remove from database
value = get_class_value(secs[0][0])
oldvalue = cur.execute('SELECT Scheduled FROM Students WHERE StudentID = ?',(stu, ) )
oldvalue = cur.fetchone()[0]
c = cur.execute('UPDATE Students SET Scheduled = ? WHERE StudentID = ?',(float(oldvalue) - float(value), stu) )
conn.commit()
items = chosenListbox.get(0,END)
a = items.index('any ' + current_class)
chosenListbox.delete(a)
doViewStudent(stu)
message = "Student Removed from Class!"
d.set(message)
def openaddblockselect(): #Add Block
# block student from class
if scheduling == 'class':
print 'block-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass block student from single section of class
if current_section != "any":
cur.execute('''SELECT A.SectionID FROM Sections A INNER JOIN
Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()[0]
global mat_yes
global mat_no
global section_index
secID = section_index[sec]
mat_yes[stuID, secID] = 0
mat_no[stuID, secID] = 1
# byClass block of student from any section in class
if current_section == 'any':
print 'any'
removePrefForClass(stu)
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
ClassID = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (ClassID,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
conn.commit()
# byStudent block from class
if scheduling == 'student':
print 'block-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu] #where stu from
# byStudent block from single section of class
if current_section != 'any':
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
sec = cur.fetchone()[0]
secID = section_index[sec]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
#by Student block of any section of class
if current_section == 'any':
print 'any'
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (cla,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 1
removePrefForClass(stu)
message = "Student Blocked From Class!"
d.set(message)
def openremoveblockselect(): #Remove Block
# remove block from student from class
if scheduling == 'class':
print 'rmblock-class'
current = openListbox.get(ANCHOR)
current = current.split("; ")[1]
current = current.split(" (")
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
stu = cur.execute('SELECT StudentID FROM Students WHERE Name = ?',(current[0],))
stu = cur.fetchone()[0]
global student_index
stuID = student_index[stu]
# byClass remove block from single section of class
if current_section != 'any':
cur.execute('''SELECT A.SectionID FROM Sections A
INNER JOIN Classes B ON A.ClassID = B.ClassID
WHERE B.ShortName = ? and A.Name = ?
''', (current_class,current_section))
sec = cur.fetchone()
global mat_no
global section_index
secID = section_index[sec]
mat_no[stuID, secID] = 0
# byClass remove block from any section of class
if current_section == 'any':
print 'any'
addPrefForClass(stu)
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
ClassID = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?'
, (ClassID,))
secs = cur.fetchall()
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 0
conn.commit()
if scheduling == 'student':
print 'rmblock-student'
current = openListbox.get(ANCHOR)
global current_section
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
if current == "any":
current_section = "any"
print 'any-not yet supported'
else:
current_section = current[1]
stu = current_student.split(":")[0]
stu = int(stu)
global student_index
stuID = student_index[stu]
# byStudent remove block from single section of class
if current_section != 'any':
global mat_yes
global mat_no
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?',(current_class,))
cla = cur.fetchone()[0]
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ? and Name = ?', (cla, current_section))
sec = cur.fetchone()[0]
secID = section_index[sec[0]]
mat_yes[stuID,secID] = 0
mat_no[stuID,secID] = 0
# byStudent remove block from any section of class
if current_section == 'any':
global mat_add
global section_index
cur.execute('SELECT ClassID FROM Classes WHERE ShortName = ?', (current_class,))
cla = cur.fetchone()[0]
# get sections of class
cur.execute('SELECT SectionID FROM Sections WHERE ClassID = ?', (cla, ))
secs = cur.fetchall()
global mat_add
global section_index
for sec in secs:
secID = section_index[sec[0]]
mat_yes[stuID, secID] = 0
mat_no[stuID, secID] = 0
mat_add[stuID,secID] = 0
addPrefForClass(stu)
message = "Student Block Removed!"
d.set(message)
def addPrefForClass(student):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
secs = cur.execute('''SELECT A.SectionID FROM Classes B
INNER JOIN Sections A
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''', (current_class,))
secs = cur.fetchall()
stuID = student_index[student]
for s in secs:
secID = section_index[s[0]]
cpref = globalvars.mat_prefs[stuID,secID]
globalvars.mat_prefs[stuID,secID] = int(cpref) + 10000
def removePrefForClass(student):
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
secs = cur.execute('''SELECT A.SectionID FROM Classes B
INNER JOIN Sections A
ON A.ClassID = B.ClassID
WHERE B.ShortName = ?''', (current_class,))
secs = cur.fetchall()
stuID = student_index[student]
for s in secs:
secID = section_index[s[0]]
cpref = globalvars.mat_prefs[stuID,secID]
globalvars.mat_prefs[stuID,secID] = int(cpref) - 10000
# File functions
def doNewSchedule():
print 'doNewSchedule'
import numpy as np
import globalvars
message = "Starting a New Schedule"
d.set(message)
# Data
try:
# open from file
globalvars.mat_prefs = np.load(globalvars.mat_prefs_path)
except:
# generate if unable to open
message = "Generating Missing Files"
d.set(message)
globalvars.mat_prefs = matrices.matrix_pref(d)
global section_index
section_index = matrices.section_index()
global student_index
student_index = matrices.student_index()
global mat_yes
mat_yes = matrices.matrix_schedule_manual()
global mat_add
mat_add = matrices.matrix_schedule_manual()
global mat_no
mat_no = matrices.matrix_schedule_manual()
try:
globalvars.matrix_sections = np.load(globalvars.sec_sec_matrix_path)
except:
try:
message = "Generating Missing Files"
d.set(message)
globalvars.matrix_sections = matrices.matrix_sections()
except:
globalvars.matrix_sections = np.zeros((100,100))
globalvars.matrix_sections.flags.writeable = True
global output
output = "output"
global scheduling
global mat_sch
global current_class
global current_student
global current_section
global sec
# update database
globalvars.database_path
conn = sqlite3.connect(globalvars.database_path)
cur = conn.cursor()
cur.execute('UPDATE Sections SET Scheduled = 0')
cur.execute('UPDATE Sections SET StudentID = 0')
cur.execute('UPDATE Students SET Scheduled = 0')
conn.commit()
message = "New Schedule"
d.set(message)
def doOpenSchedule(output2 = None):
print 'doOpenSchedule'
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter Name To Open:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.mySubmitButton = Button(top, text='Open', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
if output2 is None:
global output
output = onClick()
else:
global output
output = output2
make_sure_path_exists(output)
global mat_yes
mat_yes = np.load(output + "/mat_yes.npy")
global mat_add
mat_add = np.load(output + "/mat_add.npy")
global mat_no
mat_no = np.load(output + "/mat_no.npy")
globalvars.matrix_sections = np.load(output + "/matrix_sections.npy")
globalvars.mat_prefs = np.load(output + "/mat_prefs.npy") # matrices.matrix_pref()
global section_index
section_index = matrices.section_index()
global student_index
student_index = matrices.student_index()
message = "Openned Schedule from " + output
d.set(message)
def doSave(output2):
print 'doSave'
np.save(output2 + "/mat_yes.npy", mat_yes)
np.save(output2 + "/mat_add.npy", mat_add)
np.save(output2 + "/mat_no.npy", mat_no)
np.save(output2 + "/matrix_sections.npy", globalvars.matrix_sections)
np.save(output2 + "/mat_prefs.npy", globalvars.mat_prefs)
message = "Schedule Saved"
d.set(message)
def doSaveAs():
print 'doSaveAs'
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class MyDialog:
def __init__(self, parent):
top = self.top = Toplevel(parent)
self.myLabel = Label(top, text='Enter Name To Save Output:')
self.myLabel.pack()
self.myEntryBox = Entry(top)
self.myEntryBox.pack()
self.mySubmitButton = Button(top, text='Save', command=self.send)
self.mySubmitButton.pack()
def send(self):
self.value = self.myEntryBox.get()
self.top.destroy()
def onClick():
inputDialog = MyDialog(root)
root.wait_window(inputDialog.top)
return(inputDialog.value)
global output
output = onClick()
make_sure_path_exists(output)
#np.save(output + "/automats", automats)
np.save(output + "/mat_yes.npy",mat_yes)
np.save(output + "/mat_add.npy", mat_add)
np.save(output + "/mat_no.npy", mat_no)
np.save(output + "/mat_prefs.npy", globalvars.mat_prefs)
np.save(output + "/matrix_sections.npy", globalvars.matrix_sections)
message = "Schedule Saved"
d.set(message)
# Main loop
root = Tk()
# Layout Frames
navFrame = Frame(root)
statusFrame = Frame(root)
calendarFrame = Frame(root)
navFrame.pack(side = TOP)
statusFrame.pack(side = BOTTOM, fill = X)
calendarFrame.pack(side = BOTTOM, fill = BOTH)
doCalendar(calendarFrame)
d = StatusBar(statusFrame)
d.pack(side = LEFT)
leftListbox = Listbox(navFrame)
leftListbox.pack(side = LEFT)
buttonSelectFrame = Frame(navFrame)
buttonSelectFrame.pack(side = LEFT)
bselect = Button(buttonSelectFrame, text="Select", command=lambda : leftselect()) # lambda necessary to prevent call upon opening
bselect.pack(side = TOP)
chosenListbox = Listbox(navFrame)
chosenListbox.pack(side = LEFT)
buttonFrame = Frame(navFrame)
buttonFrame.pack(side = LEFT)
bAdd = Button(buttonFrame, text="View", command=lambda : centerselect())
bAdd.pack(side = TOP)
openListbox = Listbox(navFrame)
openListbox.pack(side = LEFT)
buttonFrame2 = Frame(navFrame)
buttonFrame2.pack(side = LEFT)
bRemove = Button(buttonFrame2, text="Schedule", command=lambda : openaddselect())
bRemove.pack(side = TOP)
b2Remove = Button(buttonFrame2, text="Remove", command= lambda : openremoveselect())
b2Remove.pack(side = TOP)
b3Remove = Button(buttonFrame2, text="Add Block", command= lambda : openaddblockselect())
b3Remove.pack(side = TOP)
b4Remove = Button(buttonFrame2, text="Remove Block", command= lambda : openremoveblockselect())
b4Remove.pack(side = TOP)
# Menu Bar
menu = Menu(root)
root.config(menu = menu)
filemenu = Menu(menu)
updatemenu = Menu(menu)
schmenu = Menu(menu)
googlemenu = Menu(menu)
menu.add_cascade(label = "File", menu = filemenu)
menu.add_cascade(label = "Schedule", menu = schmenu)
menu.add_cascade(label = "Update", menu = updatemenu)
menu.add_cascade(label = "Google Survey", menu = googlemenu)
##File Menu
filemenu.add_command(label = "New Schedule", command=lambda : doNewSchedule())
filemenu.add_command(label = "Open Schedule", command = lambda: doOpenSchedule())
filemenu.add_separator()
filemenu.add_command(label = "Save Schedule", command = lambda : doSave(output))
filemenu.add_command(label = "Save Schedule As", command = lambda : doSaveAs())
filemenu.add_separator()
filemenu.add_command(label = "Export Email", command = lambda: export.doExportMail(output2 = output, d = d))
filemenu.add_command(label = "Export Susan", command = lambda: export.doExportSusan(output2 = output, d = d))
filemenu.add_command(label = "Export Linda", command = lambda: export.doExportLinda(output2 = output, d = d))
filemenu.add_command(label = "Export All", command = lambda: export.doExportAll(output2 = output, d = d))
filemenu.add_separator()
filemenu.add_command(label = "Exit", command = doQuit)
## Schedule Menu
schmenu.add_command(label = "By Class", command = lambda : doByClass())
schmenu.add_command(label = "By Student", command = lambda : doByStudent())
schmenu.add_separator()
schmenu.add_command(label = "Automate (Fast)", command = lambda : doAutomateFast())
schmenu.add_command(label = "Automate (Best)", command = lambda : doAutomateFast())
## Update Menu
updatemenu.add_command(label = "Download Classes", command = lambda : doDownloadClasses())
updatemenu.add_command(label = "Update Class Worths", command = lambda : doUpdateClassWorth())
updatemenu.add_command(label = "Update Classes", command = lambda : doUpdateClasses())
updatemenu.add_command(label = "Update Students", command = lambda: doUpdateStudents())
## Google Survey Meny
googlemenu.add_command(label = "List Classes", command = lambda : doListClasses())
googlemenu.add_command(label = "List Professors", command = lambda : doListProfessors())
# Run at startup
#doNewSchedule()
import os
try:
dir_path = os.path.join(os.environ['APPDATA'], 'TAScheduling')
except KeyError:
dir_path = os.path.join(os.environ['HOME'], '.TAScheduling')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
globalvars.database_path = os.path.join(dir_path, 'tascheduling.db')
globalvars.mat_prefs_path = os.path.join(dir_path, 'student_preferences.npy')
globalvars.sec_sec_matrix_path = os.path.join(dir_path, 'section_section_matrix.npy')
globalvars.para_path = os.path.join(dir_path, 'parameters.txt')
sqlite3.connect(globalvars.database_path)
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
make_sure_path_exists('data/')
root.mainloop()
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module offers a class to enable your code to speak BGP protocol.
"""
import netaddr
from ryu.lib import hub
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.api.base import call
from ryu.services.protocols.bgp.api.base import PREFIX
from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE
from ryu.services.protocols.bgp.api.base import EVPN_ESI
from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID
from ryu.services.protocols.bgp.api.base import IP_ADDR
from ryu.services.protocols.bgp.api.base import MAC_ADDR
from ryu.services.protocols.bgp.api.base import NEXT_HOP
from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
from ryu.services.protocols.bgp.api.base import ROUTE_FAMILY
from ryu.services.protocols.bgp.api.base import EVPN_VNI
from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE
from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE
from ryu.services.protocols.bgp.api.prefix import EVPN_MAC_IP_ADV_ROUTE
from ryu.services.protocols.bgp.api.prefix import EVPN_MULTICAST_ETAG_ROUTE
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_VXLAN
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_NVGRE
from ryu.services.protocols.bgp.api.prefix import (
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP)
from ryu.services.protocols.bgp.operator import ssh
from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import (
DEFAULT_REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_STALEPATH_TIME)
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
from ryu.services.protocols.bgp.rtconf import neighbors
from ryu.services.protocols.bgp.rtconf import vrfs
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_EVPN
from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_FOUR_OCTET_AS_NUMBER
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_EVPN
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_CAP_ENHANCED_REFRESH, DEFAULT_CAP_FOUR_OCTET_AS_NUMBER)
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP
from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD
from ryu.services.protocols.bgp.rtconf.neighbors import IS_ROUTE_SERVER_CLIENT
from ryu.services.protocols.bgp.rtconf.neighbors import IS_NEXT_HOP_SELF
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_ADDRESS
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_PORT
from ryu.services.protocols.bgp.rtconf.vrfs import SUPPORTED_VRF_RF
from ryu.services.protocols.bgp.info_base.base import Filter
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
NEIGHBOR_CONF_MED = MULTI_EXIT_DISC # for backward compatibility
RF_VPN_V4 = vrfs.VRF_RF_IPV4
RF_VPN_V6 = vrfs.VRF_RF_IPV6
RF_L2_EVPN = vrfs.VRF_RF_L2_EVPN
class EventPrefix(object):
"""
Used to pass an update on any best remote path to
best_path_change_handler.
================ ======================================================
Attribute Description
================ ======================================================
remote_as The AS number of a peer that caused this change
route_dist None in the case of IPv4 or IPv6 family
prefix A prefix was changed
nexthop The nexthop of the changed prefix
label MPLS label for VPNv4, VPNv6 or EVPN prefix
path An instance of ``info_base.base.Path`` subclass
is_withdraw True if this prefix has gone otherwise False
================ ======================================================
"""
def __init__(self, path, is_withdraw):
self.path = path
self.is_withdraw = is_withdraw
@property
def remote_as(self):
return self.path.source.remote_as
@property
def route_dist(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.route_dist
else:
return None
@property
def prefix(self):
if isinstance(self.path, Ipv4Path) or isinstance(self.path, Ipv6Path):
return self.path.nlri.addr + '/' + str(self.path.nlri.length)
elif (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.prefix
else:
return None
@property
def nexthop(self):
return self.path.nexthop
@property
def label(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return getattr(self.path.nlri, 'label_list', None)
else:
return None
class BGPSpeaker(object):
def __init__(self, as_number, router_id,
bgp_server_port=DEFAULT_BGP_SERVER_PORT,
refresh_stalepath_time=DEFAULT_REFRESH_STALEPATH_TIME,
refresh_max_eor_time=DEFAULT_REFRESH_MAX_EOR_TIME,
best_path_change_handler=None,
peer_down_handler=None,
peer_up_handler=None,
ssh_console=False,
ssh_port=ssh.DEFAULT_SSH_PORT,
ssh_host=ssh.DEFAULT_SSH_HOST,
ssh_host_key=ssh.DEFAULT_SSH_HOST_KEY,
label_range=DEFAULT_LABEL_RANGE):
"""Create a new BGPSpeaker object with as_number and router_id to
listen on bgp_server_port.
``as_number`` specifies an Autonomous Number. It must be an integer
between 1 and 65535.
``router_id`` specifies BGP router identifier. It must be the
string representation of an IPv4 address (e.g. 10.0.0.1).
``bgp_server_port`` specifies TCP listen port number. 179 is
used if not specified.
``refresh_stalepath_time`` causes the BGP speaker to remove
stale routes from the BGP table after the timer expires, even
if the speaker does not receive a Router-Refresh End-of-RIB
message. This feature is disabled (not implemented yet).
``refresh_max_eor_time`` causes the BGP speaker to generate a
Route-Refresh End-of-RIB message if it was not able to
generate one due to route flapping. This feature is disabled
(not implemented yet).
``best_path_change_handler``, if specified, is called when any
best remote path is changed due to an update message or remote
peer down. The handler is supposed to take one argument, the
instance of an EventPrefix class instance.
``peer_down_handler``, if specified, is called when BGP peering
session goes down.
``peer_up_handler``, if specified, is called when BGP peering
session goes up.
``ssh_console`` specifies whether or not SSH CLI need to be started.
``ssh_port`` specifies the port number for SSH CLI server.
``ssh_host`` specifies the IP address for SSH CLI server.
``ssh_host_key`` specifies the path to the host key added to
the keys list used by SSH CLI server.
``label_range`` specifies the range of MPLS labels generated
automatically.
"""
super(BGPSpeaker, self).__init__()
settings = {
LOCAL_AS: as_number,
ROUTER_ID: router_id,
BGP_SERVER_PORT: bgp_server_port,
REFRESH_STALEPATH_TIME: refresh_stalepath_time,
REFRESH_MAX_EOR_TIME: refresh_max_eor_time,
LABEL_RANGE: label_range,
}
self._core_start(settings)
self._init_signal_listeners()
self._best_path_change_handler = best_path_change_handler
self._peer_down_handler = peer_down_handler
self._peer_up_handler = peer_up_handler
if ssh_console:
ssh_settings = {
ssh.SSH_PORT: ssh_port,
ssh.SSH_HOST: ssh_host,
ssh.SSH_HOST_KEY: ssh_host_key,
}
hub.spawn(ssh.SSH_CLI_CONTROLLER.start, **ssh_settings)
def _notify_peer_down(self, peer):
remote_ip = peer.ip_address
remote_as = peer.remote_as
if self._peer_down_handler:
self._peer_down_handler(remote_ip, remote_as)
def _notify_peer_up(self, peer):
remote_ip = peer.protocol.recv_open_msg.bgp_identifier
remote_as = peer.protocol.recv_open_msg.my_as
if self._peer_up_handler:
self._peer_up_handler(remote_ip, remote_as)
def _notify_best_path_changed(self, path, is_withdraw):
if (not path.source
or not isinstance(path, (Ipv4Path, Ipv6Path,
Vpnv4Path, Vpnv6Path, EvpnPath))):
return
ev = EventPrefix(path, is_withdraw)
if self._best_path_change_handler:
self._best_path_change_handler(ev)
def _init_signal_listeners(self):
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_BEST_PATH_CHANGED,
lambda _, info:
self._notify_best_path_changed(info['path'],
info['is_withdraw'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_DOWN,
lambda _, info:
self._notify_peer_down(info['peer'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_UP,
lambda _, info:
self._notify_peer_up(info['peer'])
)
def _core_start(self, settings):
waiter = hub.Event()
call('core.start', waiter=waiter, **settings)
waiter.wait()
def _serve_forever(self):
pass
def shutdown(self):
""" Shutdown BGP speaker
"""
call('core.stop')
def neighbor_add(self, address, remote_as,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None, is_route_server_client=False,
is_next_hop_self=False, local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE):
""" This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``enable_ipv4`` enables IPv4 address family for this
neighbor. The default is True.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor. The default is False.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor. The default is False.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor. The default is False.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor. The default is False.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor. The default is True.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
The default is None and if not specified, MED value is
not sent to the neighbor. It must be an integer.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
The default is the AS number of BGPSpeaker instance.
``connect_mode`` specifies how to connect to this neighbor.
CONNECT_MODE_ACTIVE tries to connect from us.
CONNECT_MODE_PASSIVE just listens and wait for the connection.
CONNECT_MODE_BOTH use both methods.
The default is CONNECT_MODE_BOTH.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
}
# v6 advertisement is available with only v6 peering
if netaddr.valid_ipv4(address):
bgp_neighbor[CAP_MBGP_IPV4] = enable_ipv4
bgp_neighbor[CAP_MBGP_IPV6] = False
bgp_neighbor[CAP_MBGP_VPNV4] = enable_vpnv4
bgp_neighbor[CAP_MBGP_VPNV6] = enable_vpnv6
bgp_neighbor[CAP_MBGP_EVPN] = enable_evpn
elif netaddr.valid_ipv6(address):
bgp_neighbor[CAP_MBGP_IPV4] = False
bgp_neighbor[CAP_MBGP_IPV6] = True
bgp_neighbor[CAP_MBGP_VPNV4] = False
bgp_neighbor[CAP_MBGP_VPNV6] = False
bgp_neighbor[CAP_MBGP_EVPN] = enable_evpn
else:
# FIXME: should raise an exception
pass
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor)
def neighbor_del(self, address):
""" This method unregister the registered neighbor. If a session with
the peer exists, the session will be closed.
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('neighbor.delete', **bgp_neighbor)
def neighbor_reset(self, address):
""" This method reset the registered neighbor.
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('core.reset_neighbor', **bgp_neighbor)
def neighbor_update(self, address, conf_type, conf_value):
""" This method changes the neighbor configuration.
``address`` specifies the IP address of the peer.
``conf_type`` specifies configuration type which you want to change.
Currently ryu.services.protocols.bgp.bgpspeaker.MULTI_EXIT_DISC
can be specified.
``conf_value`` specifies value for the configuration type.
"""
assert conf_type == MULTI_EXIT_DISC or conf_type == CONNECT_MODE
func_name = 'neighbor.update'
attribute_param = {}
if conf_type == MULTI_EXIT_DISC:
attribute_param = {neighbors.MULTI_EXIT_DISC: conf_value}
elif conf_type == CONNECT_MODE:
attribute_param = {neighbors.CONNECT_MODE: conf_value}
param = {neighbors.IP_ADDRESS: address,
neighbors.CHANGES: attribute_param}
call(func_name, **param)
def neighbor_state_get(self, address=None, format='json'):
""" This method returns the state of peer(s) in a json
format.
``address`` specifies the address of a peer. If not given, the
state of all the peers return.
``format`` specifies the format of the response.
This parameter must be 'json' or 'cli'.
"""
show = {
'params': ['neighbor', 'summary'],
'format': format,
}
if address:
show['params'].append(address)
return call('operator.show', **show)
def prefix_add(self, prefix, next_hop=None, route_dist=None):
""" This method adds a new prefix to be advertised.
``prefix`` must be the string representation of an IP network
(e.g., 10.1.1.0/24).
``next_hop`` specifies the next hop address for this
prefix. This parameter is necessary for only VPNv4 and VPNv6
address families.
``route_dist`` specifies a route distinguisher value. This
parameter is necessary for only VPNv4 and VPNv6 address
families.
"""
func_name = 'network.add'
networks = {
PREFIX: prefix,
}
if next_hop:
networks[NEXT_HOP] = next_hop
if route_dist:
func_name = 'prefix.add_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
if rf == vrfs.VRF_RF_IPV6 and netaddr.valid_ipv4(next_hop):
# convert the next_hop to IPv4-Mapped IPv6 Address
networks[NEXT_HOP] = \
str(netaddr.IPAddress(next_hop).ipv6())
return call(func_name, **networks)
def prefix_del(self, prefix, route_dist=None):
""" This method deletes a advertised prefix.
``prefix`` must be the string representation of an IP network
(e.g., 10.1.1.0/24).
``route_dist`` specifies a route distinguisher value. This
parameter is necessary for only VPNv4 and VPNv6 address
families.
"""
func_name = 'network.del'
networks = {
PREFIX: prefix,
}
if route_dist:
func_name = 'prefix.delete_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
call(func_name, **networks)
def evpn_prefix_add(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None,
vni=None, next_hop=None, tunnel_type=None,
pmsi_tunnel_type=None):
""" This method adds a new EVPN route to be advertised.
``route_type`` specifies one of the EVPN route type name. The
supported route types are EVPN_MAC_IP_ADV_ROUTE and
EVPN_MULTICAST_ETAG_ROUTE.
``route_dist`` specifies a route distinguisher value.
``esi`` is an integer value to specify the Ethernet Segment
Identifier. 0 is the default and denotes a single-homed site.
``ethernet_tag_id`` specifies the Ethernet Tag ID.
``mac_addr`` specifies a MAC address to advertise.
``ip_addr`` specifies an IPv4 or IPv6 address to advertise.
``vni`` specifies an Virtual Network Identifier for VXLAN
or Virtual Subnet Identifier for NVGRE.
If tunnel_type is not 'vxlan' or 'nvgre', this field is ignored.
``next_hop`` specifies the next hop address for this prefix.
``tunnel_type`` specifies the data plane encapsulation type
to advertise. By the default, this encapsulation attribute is
not advertised.
```pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This field is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE.
"""
func_name = 'evpn_prefix.add_local'
# Check the default values
if not next_hop:
next_hop = '0.0.0.0'
# Set required arguments
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
NEXT_HOP: next_hop}
# Set optional arguments
if tunnel_type:
kwargs[TUNNEL_TYPE] = tunnel_type
# Set route type specific arguments
if route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
# Set tunnel type specific arguments
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
# Set PMSI Tunnel Attribute arguments
if pmsi_tunnel_type in [
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP]:
kwargs[PMSI_TUNNEL_TYPE] = pmsi_tunnel_type
elif pmsi_tunnel_type is not None:
raise ValueError('Unsupported PMSI tunnel type: %s' %
pmsi_tunnel_type)
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def evpn_prefix_del(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None):
""" This method deletes an advertised EVPN route.
``route_type`` specifies one of the EVPN route type name.
``route_dist`` specifies a route distinguisher value.
``esi`` is an integer value to specify the Ethernet Segment
Identifier. 0 is the default and denotes a single-homed site.
``ethernet_tag_id`` specifies the Ethernet Tag ID.
``mac_addr`` specifies a MAC address to advertise.
``ip_addr`` specifies an IPv4 or IPv6 address to advertise.
"""
func_name = 'evpn_prefix.delete_local'
# Set required arguments
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist}
# Set route type specific arguments
if route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None,
route_family=RF_VPN_V4, multi_exit_disc=None):
""" This method adds a new vrf used for VPN.
``route_dist`` specifies a route distinguisher value.
``import_rts`` specifies a list of route targets to be imported.
``export_rts`` specifies a list of route targets to be exported.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``route_family`` specifies route family of the VRF.
This parameter must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN.
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
It must be an integer.
"""
assert route_family in SUPPORTED_VRF_RF,\
'route_family must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN'
vrf = {
vrfs.ROUTE_DISTINGUISHER: route_dist,
vrfs.IMPORT_RTS: import_rts,
vrfs.EXPORT_RTS: export_rts,
vrfs.SITE_OF_ORIGINS: site_of_origins,
vrfs.VRF_RF: route_family,
vrfs.MULTI_EXIT_DISC: multi_exit_disc,
}
call('vrf.create', **vrf)
def vrf_del(self, route_dist):
""" This method deletes the existing vrf.
``route_dist`` specifies a route distinguisher value.
"""
vrf = {vrfs.ROUTE_DISTINGUISHER: route_dist}
call('vrf.delete', **vrf)
def vrfs_get(self, subcommand='routes', route_dist=None,
route_family='all', format='json'):
""" This method returns the existing vrfs.
``subcommand`` specifies the subcommand.
'routes': shows routes present for vrf
'summary': shows configuration and summary of vrf
``route_dist`` specifies a route distinguisher value.
If route_family is not 'all', this value must be specified.
``route_family`` specifies route family of the VRF.
This parameter must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN
or 'all'.
``format`` specifies the format of the response.
This parameter must be 'json' or 'cli'.
"""
show = {
'format': format,
}
if route_family in SUPPORTED_VRF_RF:
assert route_dist is not None
show['params'] = ['vrf', subcommand, route_dist, route_family]
else:
show['params'] = ['vrf', subcommand, 'all']
return call('operator.show', **show)
def rib_get(self, family='all', format='json'):
""" This method returns the BGP routing information in a json
format. This will be improved soon.
``family`` specifies the address family of the RIB (e.g. 'ipv4').
``format`` specifies the format of the response.
This parameter must be 'json' or 'cli'.
"""
show = {
'params': ['rib', family],
'format': format
}
return call('operator.show', **show)
def neighbor_get(self, route_type, address, format='json'):
""" This method returns the BGP adj-RIB-in/adj-RIB-out information
in a json format.
``route_type`` This parameter is necessary for only received-routes
and sent-routes.
received-routes : paths received and not withdrawn by given peer
sent-routes : paths sent and not withdrawn to given peer
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
``format`` specifies the format of the response.
This parameter must be 'json' or 'cli'.
"""
show = {
'format': format,
}
if route_type == 'sent-routes' or route_type == 'received-routes':
show['params'] = ['neighbor', route_type, address, 'all']
else:
show['params'] = ['neighbor', 'received-routes', address, 'all']
return call('operator.show', **show)
def neighbors_get(self, format='json'):
""" This method returns a list of the BGP neighbors.
``format`` specifies the format of the response.
This parameter must be 'json' or 'cli'.
"""
show = {
'params': ['neighbor'],
'format': format,
}
return call('operator.show', **show)
def _set_filter(self, filter_type, address, filters):
assert filter_type in ('in', 'out'),\
'filter type must be \'in\' or \'out\''
assert all(isinstance(f, Filter) for f in filters),\
'all the items in filters must be an instance of Filter sub-class'
if filters is None:
filters = []
func_name = 'neighbor.' + filter_type + '_filter.set'
param = {
neighbors.IP_ADDRESS: address,
}
if filter_type == 'in':
param[neighbors.IN_FILTER] = filters
else:
param[neighbors.OUT_FILTER] = filters
call(func_name, **param)
def out_filter_set(self, address, filters):
""" This method sets out-filter to neighbor.
``address`` specifies the IP address of the peer.
``filters`` specifies a filter list to filter the path advertisement.
The contents must be an instance of Filter sub-class
If you want to define out-filter that send only a particular
prefix to neighbor, filters can be created as follows::
p = PrefixFilter('10.5.111.0/24',
policy=PrefixFilter.POLICY_PERMIT)
all = PrefixFilter('0.0.0.0/0',
policy=PrefixFilter.POLICY_DENY)
pList = [p, all]
self.bgpspeaker.out_filter_set(neighbor_address, pList)
.. Note::
out-filter evaluates paths in the order of Filter in the pList.
"""
self._set_filter('out', address, filters)
def out_filter_get(self, address):
""" This method gets out-filter setting from the specified neighbor.
``address`` specifies the IP address of the peer.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.out_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def in_filter_set(self, address, filters):
"""This method sets in-bound filters to a neighbor.
``address`` specifies the IP address of the neighbor
``filters`` specifies filter list applied before advertised paths are
imported to the global rib. All the items in the list must be an
instance of Filter sub-class.
"""
self._set_filter('in', address, filters)
def in_filter_get(self, address):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.in_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def bmp_server_add(self, address, port):
"""This method registers a new BMP (BGP monitoring Protocol)
server. The BGP speaker starts to send BMP messages to the
server. Currently, only one BMP server can be registered.
``address`` specifies the IP address of a BMP server.
``port`` specifies the listen port number of a BMP server.
"""
func_name = 'bmp.start'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def bmp_server_del(self, address, port):
""" This method unregister the registered BMP server.
``address`` specifies the IP address of a BMP server.
``port`` specifies the listen port number of a BMP server.
"""
func_name = 'bmp.stop'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def attribute_map_set(self, address, attribute_maps,
route_dist=None, route_family=RF_VPN_V4):
"""This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be RF_VPN_V4 or RF_VPN_V6.
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
"""
assert route_family in (RF_VPN_V4, RF_VPN_V6),\
'route_family must be RF_VPN_V4 or RF_VPN_V6'
func_name = 'neighbor.attribute_map.set'
param = {
neighbors.IP_ADDRESS: address,
neighbors.ATTRIBUTE_MAP: attribute_maps,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
call(func_name, **param)
def attribute_map_get(self, address, route_dist=None,
route_family=RF_VPN_V4):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
``route_dist`` specifies route distinguisher that has attribute_maps.
``route_family`` specifies route family of the VRF.
This parameter must be RF_VPN_V4 or RF_VPN_V6.
Returns a list object containing an instance of AttributeMap
"""
assert route_family in (RF_VPN_V4, RF_VPN_V6),\
'route_family must be RF_VPN_V4 or RF_VPN_V6'
func_name = 'neighbor.attribute_map.get'
param = {
neighbors.IP_ADDRESS: address,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
return call(func_name, **param)
@staticmethod
def _check_rf_and_normalize(prefix):
""" check prefix's route_family and if the address is
IPv6 address, return IPv6 route_family and normalized IPv6 address.
If the address is IPv4 address, return IPv4 route_family
and the prefix itself.
"""
ip, masklen = prefix.split('/')
if netaddr.valid_ipv6(ip):
# normalize IPv6 address
ipv6_prefix = str(netaddr.IPAddress(ip)) + '/' + masklen
return vrfs.VRF_RF_IPV6, ipv6_prefix
else:
return vrfs.VRF_RF_IPV4, prefix
| |
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except IOError:
continue
morecaps = readmailcapfile(fp)
fp.close()
for key, value in morecaps.items():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# XXX Actually, this is Unix-specific
if 'MAILCAPS' in os.environ:
str = os.environ['MAILCAPS']
mailcaps = str.split(':')
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = filter(lambda e, key=key: key in e, entries)
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print("usage: mailcap [MIMEtype file] ...")
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print("No viewer found for", type)
else:
print("Executing:", command)
sts = os.system(command)
if sts:
print("Exit status:", sts)
def show(caps):
print("Mailcap files:")
for fn in listmailcapfiles(): print("\t" + fn)
print()
if not caps: caps = getcaps()
print("Mailcap entries:")
print()
ckeys = caps.keys()
ckeys.sort()
for type in ckeys:
print(type)
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print(" %-15s" % k, e[k])
print()
if __name__ == '__main__':
test()
| |
from app.api.models.LXDModule import LXDModule
import logging
logging = logging.getLogger(__name__)
class LXCContainer(LXDModule):
def __init__(self, input):
self.data = {}
self.remoteHost = '127.0.0.1'
if not input.get('name'):
logging.error('Container name is required for any container operation')
raise ValueError('Missing container name.')
self.setName(input.get('name'))
logging.info('Connecting to LXD')
super(LXCContainer, self).__init__(remoteHost=self.remoteHost)
if self.client.instances.exists(self.data.get('name')):
existing = self.info()
self.data['config'] = existing['config']
self.data['devices'] = existing['devices']
if input.get('image'):
self.setImageType(input.get('image'))
if input.get('profiles'):
self.setProfile(input.get('profiles'))
if input.get('ephemeral'):
self.setEphemeral(input.get('ephemeral'))
if input.get('description'):
self.setDescription(input.get('description'))
if input.get('cpu'):
self.setCPU(input.get('cpu'))
if input.get('memory'):
self.setMemory(input.get('memory'))
if input.get('newContainer'):
self.setNewContainer(input.get('newContainer'))
if input.get('imageAlias'):
self.setImageAlias(input.get('imageAlias'))
if input.get('autostart') != None:
self.setBootType(input.get('autostart'))
else:
self.setBootType(True)
if input.get('stateful') != None:
self.setEphemeral(not input.get('stateful'))
else:
self.setEphemeral(False)
if input.get('newName'):
self.setNewName(input.get('newName'))
if input.get('config'):
self.setConfig(input.get('config'))
if input.get('devices'):
self.setDevices(input.get('devices'))
def setImageType(self, input):
# Detect image type (alias or fingerprint)
logging.debug('Checking if image {} exists'.format(input))
tempImageType = self.hasImage(input)
if not tempImageType:
logging.error('Image with alias or fingerprint {} not found'.format(input))
raise ValueError('Image with alias or fingerprint {} not found'.format(input))
if not self.data.get('source'):
self.data['source']={'type':'image'}
self.data['source'][tempImageType] = input
def setName(self, input):
logging.debug('Setting image name to {}'.format(input))
self.data['name'] = input
def setDescription(self, input):
logging.debug('Setting image description as {}'.format(input))
self.data['description'] = input
def setProfile(self, input):
logging.debug('Setting image profiles as {}'.format(input))
self.data['profiles']=input
def setEphemeral(self, input):
logging.debug('Setting image ephemeral type to {}'.format(input))
self.data['ephemeral']=input
def initConfig(self):
if not self.data.get('config', None):
self.data['config']={}
def setCPU(self, input):
self.initConfig()
if LXDModule().setLimitsCPU():
if input.get('cores'):
logging.debug('Set CPU count to {}'.format(input.get('cores')))
self.data['config']['limits.cpu']='{}'.format(input.get('cores'))
else:
if input.get('percentage'):
if input.get('hardLimitation'):
self.data['config']['limits.cpu.allowance']='{}ms/100ms'.format(input.get('percentage'))
else:
self.data['config']['limits.cpu.allowance'] = '{}%'.format(input.get('percentage'))
logging.debug('CPU allowance limit set to {}'.format(self.data['config']['limits.cpu.allowance']))
def setMemory(self, input):
self.initConfig()
self.data['config']['limits.memory']='{}MB'.format(input.get('sizeInMB'))
self.data['config']['limits.memory.enforce'] = 'hard' if input.get('hardLimitation') else 'soft'
logging.debug('Memory limit set to {} with restrictions set to {}'.format(
self.data['config']['limits.memory'],
self.data['config']['limits.memory.enforce']
))
def setNewContainer(self, input):
self.data['newContainer'] = input
def setDevices(self, input):
self.data['devices'] = input
def setImageAlias(self, input):
logging.debug('Setting image alias as {}'.format(input))
self.data['imageAlias'] = input
def setBootType(self, input):
self.initConfig()
self.data['config']['boot.autostart'] = '1' if input else '0'
logging.debug('Setting autostart boot type to {}'.format(input))
def setEphemeral(self, input):
self.initConfig()
self.data['ephemeral'] = input
logging.debug('Setting container as ephemeral {}'.format(input))
def setNewName(self, input):
self.initConfig()
logging.debug('Setting new container name as: {}'.format(input))
self.data['newName'] = input
def setConfig(self, input):
logging.debug('Setting key-value for container config')
self.initConfig()
for key in input.keys():
self.data['config'][key] = input[key]
def info(self):
try:
logging.info('Reading container {} information'.format(self.data.get('name')))
c = self.client.instances.get(self.data.get('name'))
container = self.client.api.instances[self.data.get('name')].get().json()['metadata']
container['cpu'] = c.state().cpu
container['memory'] = c.state().memory
container['network'] = c.state().network
container['processes'] = c.state().processes
container['pid'] = c.state().pid
container['disk'] = c.state().disk
return container
except Exception as e:
logging.error('Failed to retrieve information for container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def create(self, waitIt=True):
try:
instanceType = ''
for image in LXDModule().listLocalImages():
if(image["fingerprint"] == self.data['source']['fingerprint']):
instanceType = image["type"]
break
logging.info('Creating container {}'.format(self.data.get('name')))
self.data['type'] = instanceType
self.client.instances.create(self.data, wait=waitIt)
if self.data['config']['boot.autostart'] == '1':
self.start(waitIt)
return self.info()
except Exception as e:
logging.error('Failed to create container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def delete(self, force=False):
try:
logging.info('Deleting container with {} enforcement set to {}'.format(self.data.get('name'), force))
container = self.client.instances.get(self.data.get('name'))
if self.info().get('ephemeral'):
container.stop(wait=True)
return
elif force and self.info().get('status') == 'Running':
container.stop(wait=True)
container.delete()
except Exception as e:
logging.error('Failed to delete container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def update(self):
try:
logging.info('Updating container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
if self.data.get('config'):
container.config = self.data.get('config')
if self.data.get('profiles'):
container.profiles = self.data.get('profiles')
if self.data.get('description'):
container.description = self.data.get('description')
container.save(True)
if self.data.get('newName'):
self.rename()
return self.info()
except Exception as e:
logging.error('Failed to update container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def start(self, waitIt=True):
try:
logging.info('Starting container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
container.start(wait=waitIt)
except Exception as e:
logging.error('Failed to start container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def stop(self, waitIt=True):
try:
logging.info('Stopping container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
container.stop(wait=waitIt)
except Exception as e:
logging.error('Failed to stop container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def restart(self, waitIt=True):
try:
logging.info('Restarting container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
container.restart(wait=waitIt)
except Exception as e:
logging.error('Failed to restart container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def clone(self):
try:
logging.info('Cloning container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
if container.status == 'Running':
container.stop(wait=True)
copyData = container.generate_migration_data()
copyData['source'] = {'type': 'copy', 'source': self.data.get('name')}
copyData['name'] = self.data.get('newContainer')
newContainer = self.client.instances.create(copyData, wait=True)
container.start(wait=True)
newContainer.start(wait=True)
return self.client.api.instances[self.data.get('newContainer')].get().json()['metadata']
except Exception as e:
logging.error('Failed to clone container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def move(self):
try:
logging.info('Moving container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
if container.status == 'Running':
container.stop(wait=True)
copyData = container.generate_migration_data()
copyData['source'] = {'type': 'copy', 'source': self.data.get('name')}
copyData['name'] = self.data.get('newContainer')
newContainer = self.client.instances.create(copyData, wait=True)
newContainer.start(wait=True)
container.delete(wait=True)
return self.client.api.instances[self.data.get('newContainer')].get().json()['metadata']
except Exception as e:
logging.error('Failed to move container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def export(self, force=False):
try:
logging.info('Exporting container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
if force and container.status == 'Running':
container.stop(wait=True)
image = container.publish(wait=True)
image.add_alias(self.data.get('imageAlias'), self.data.get('name'))
try:
fingerprint = container.config.get('volatile.base_image')
self.client.api.images[image.fingerprint].put(json={'properties': self.client.api.images[fingerprint].get().json()['metadata']['properties']})
except:
logging.error('Image does not exist.')
container.start(wait=True)
return self.client.api.images[image.fingerprint].get().json()['metadata']
except Exception as e:
logging.error('Failed to export container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def rename(self, force=True):
try:
logging.info('Renaming container {} to {}'.format(self.data.get('name'), self.data.get('newName')))
if self.data.get('newName'):
if self.containerExists(self.data.get('newName')):
raise ValueError('Container with that name already exists')
container = self.client.instances.get(self.data.get('name'))
previousState = container.status
if previousState == 'Running':
if force == False:
raise ValueError('Container is running')
container.stop(wait=True)
container.rename(self.data.get('newName'), True)
if previousState == 'Running':
container.start(wait=True)
self.data['name'] = self.data.get('newName')
return self.info()
except Exception as e:
logging.error('Failed to rename container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def freeze(self, waitIt=True):
try:
logging.info('Freezing container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
container.freeze(wait=waitIt)
except Exception as e:
logging.error('Failed to freeze container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def unfreeze(self, waitIt=True):
try:
logging.info('Unfreezing container {}'.format(self.data.get('name')))
container = self.client.instances.get(self.data.get('name'))
container.unfreeze(wait=waitIt)
except Exception as e:
logging.error('Failed to unfreeze container {}'.format(self.data.get('name')))
logging.exception(e)
raise ValueError(e)
def initNetwork(self):
if not self.data.get('devices', None):
self.data['devices']={}
def addNetwork(self, network):
self.initNetwork()
self.data['devices'][network['name']]=network
try:
container = self.client.instances.get(self.data['name'])
container.devices = self.data['devices']
container.save()
return self.info()
except Exception as e:
raise ValueError(e)
def removeNetwork(self, networkName):
self.initNetwork()
del self.data['devices'][networkName]
try:
container = self.client.instances.get(self.data['name'])
container.devices = self.data['devices']
container.save()
return self.info()
except Exception as e:
raise ValueError(e)
def addProxy(self, name, proxy):
self.initNetwork()
self.data['devices'][name] = proxy
try:
container = self.client.instances.get(self.data['name'])
container.devices = self.data['devices']
container.save()
return self.info()
except Exception as e:
raise ValueError(e)
def removeProxy(self, name):
self.initNetwork()
del self.data['devices'][name]
try:
container = self.client.instances.get(self.data['name'])
container.devices = self.data['devices']
container.save()
return self.info()
except Exception as e:
raise ValueError(e)
| |
__all__ = ['XunleiClient']
import urllib
import urllib2
import cookielib
import re
import time
import os.path
import json
from ast import literal_eval
def retry(f):
#retry_sleeps = [1, 1, 1]
retry_sleeps = [1, 2, 3, 5, 10, 20, 30, 60] + [60] * 60
def withretry(*args, **kwargs):
for second in retry_sleeps:
try:
return f(*args, **kwargs)
except:
import traceback
import sys
print "Exception in user code:"
traceback.print_exc(file=sys.stdout)
time.sleep(second)
raise
return withretry
class Logger:
def stdout(self, message):
print message
def info(self, message):
print message
def debug(self, message):
pass
def trace(self, message):
pass
logger = Logger()
class XunleiClient:
page_size = 100
bt_page_size = 9999
def __init__(self, username=None, password=None, cookie_path=None, login=True):
self.username = username
self.password = password
self.cookie_path = cookie_path
if cookie_path:
self.cookiejar = cookielib.LWPCookieJar()
if os.path.exists(cookie_path):
self.load_cookies()
else:
self.cookiejar = cookielib.CookieJar()
self.set_page_size(self.page_size)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
if login:
if not self.has_logged_in():
self.login()
else:
self.id = self.get_userid()
@retry
def urlopen(self, url, **args):
logger.debug(url)
# import traceback
# for line in traceback.format_stack():
# print line.strip()
if 'data' in args and type(args['data']) == dict:
args['data'] = urlencode(args['data'])
return self.opener.open(urllib2.Request(url, **args), timeout=60)
def urlread1(self, url, **args):
args.setdefault('headers', {})
headers = args['headers']
headers.setdefault('Accept-Encoding', 'gzip, deflate')
# headers.setdefault('Referer', 'http://lixian.vip.xunlei.com/task.html')
# headers.setdefault('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0')
# headers.setdefault('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
# headers.setdefault('Accept-Language', 'zh-cn,zh;q=0.7,en-us;q=0.3')
response = self.urlopen(url, **args)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
return data
def urlread(self, url, **args):
data = self.urlread1(url, **args)
if self.is_session_timeout(data):
logger.debug('session timed out')
self.login()
data = self.urlread1(url, **args)
return data
def load_cookies(self):
self.cookiejar.load(self.cookie_path, ignore_discard=True, ignore_expires=True)
def save_cookies(self):
if self.cookie_path:
self.cookiejar.save(self.cookie_path, ignore_discard=True)
def get_cookie(self, domain, k):
if self.has_cookie(domain, k):
return self.cookiejar._cookies[domain]['/'][k].value
def has_cookie(self, domain, k):
return domain in self.cookiejar._cookies and k in self.cookiejar._cookies[domain]['/']
def get_userid(self):
if self.has_cookie('.xunlei.com', 'userid'):
return self.get_cookie('.xunlei.com', 'userid')
else:
raise Exception('Probably login failed')
def get_userid_or_none(self):
return self.get_cookie('.xunlei.com', 'userid')
def get_username(self):
return self.get_cookie('.xunlei.com', 'usernewno')
def get_gdriveid(self):
return self.get_cookie('.vip.xunlei.com', 'gdriveid')
def has_gdriveid(self):
return self.has_cookie('.vip.xunlei.com', 'gdriveid')
def get_referer(self):
return 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s' % self.id
def set_cookie(self, domain, k, v):
c = cookielib.Cookie(version=0, name=k, value=v, port=None, port_specified=False, domain=domain, domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={}, rfc2109=False)
self.cookiejar.set_cookie(c)
def del_cookie(self, domain, k):
if self.has_cookie(domain, k):
self.cookiejar.clear(domain=domain, path="/", name=k)
def set_gdriveid(self, id):
self.set_cookie('.vip.xunlei.com', 'gdriveid', id)
def set_page_size(self, n):
self.set_cookie('.vip.xunlei.com', 'pagenum', str(n))
def get_cookie_header(self):
def domain_header(domain):
root = self.cookiejar._cookies[domain]['/']
return '; '.join(k+'='+root[k].value for k in root)
return domain_header('.xunlei.com') + '; ' + domain_header('.vip.xunlei.com')
def is_login_ok(self, html):
return len(html) > 512
def has_logged_in(self):
id = self.get_userid_or_none()
if not id:
return False
#print self.urlopen('http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id).read().decode('utf-8')
self.set_page_size(1)
url = 'http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0' % id
#url = 'http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d' % current_timestamp()
r = self.is_login_ok(self.urlread(url))
self.set_page_size(self.page_size)
return r
def is_session_timeout(self, html):
is_timeout = html == '''<script>document.cookie ="sessionid=; path=/; domain=xunlei.com"; document.cookie ="lx_sessionid=; path=/; domain=vip.xunlei.com";top.location='http://cloud.vip.xunlei.com/task.html?error=1'</script>''' or html == '''<script>document.cookie ="sessionid=; path=/; domain=xunlei.com"; document.cookie ="lsessionid=; path=/; domain=xunlei.com"; document.cookie ="lx_sessionid=; path=/; domain=vip.xunlei.com";top.location='http://cloud.vip.xunlei.com/task.html?error=2'</script>'''
if is_timeout:
logger.trace(html)
return is_timeout
def login(self, username=None, password=None):
username = self.username
password = self.password
if not username and self.has_cookie('.xunlei.com', 'usernewno'):
username = self.get_username()
if not username:
# TODO: don't depend on lixian_config
import lixian_config
username = lixian_config.get_config('username')
# if not username:
# raise NotImplementedError('user is not logged in')
if not password:
raise NotImplementedError('user is not logged in')
logger.debug('login')
cachetime = current_timestamp()
check_url = 'http://login.xunlei.com/check?u=%s&cachetime=%d' % (username, cachetime)
login_page = self.urlopen(check_url).read()
verifycode = self.get_cookie('.xunlei.com', 'check_result')[2:].upper()
assert verifycode
password = encypt_password(password)
password = md5(password+verifycode)
login_page = self.urlopen('http://login.xunlei.com/sec2login/', data={'u': username, 'p': password, 'verifycode': verifycode})
self.id = self.get_userid()
self.set_page_size(1)
login_page = self.urlopen('http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d&from=0'%current_timestamp()).read()
self.set_page_size(self.page_size)
if not self.is_login_ok(login_page):
logger.trace(login_page)
raise RuntimeError('login failed')
self.save_cookies()
def logout(self):
logger.debug('logout')
#session_id = self.get_cookie('.xunlei.com', 'sessionid')
#timestamp = current_timestamp()
#url = 'http://login.xunlei.com/unregister?sessionid=%s&cachetime=%s&noCacheIE=%s' % (session_id, timestamp, timestamp)
#self.urlopen(url).read()
#self.urlopen('http://dynamic.vip.xunlei.com/login/indexlogin_contr/logout/').read()
ckeys = ["vip_isvip","lx_sessionid","vip_level","lx_login","dl_enable","in_xl","ucid","lixian_section"]
ckeys1 = ["sessionid","usrname","nickname","usernewno","userid"]
self.del_cookie('.vip.xunlei.com', 'gdriveid')
for k in ckeys:
self.set_cookie('.vip.xunlei.com', k, '')
for k in ckeys1:
self.set_cookie('.xunlei.com', k, '')
self.save_cookies()
def read_task_page_url(self, url):
page = self.urlread(url).decode('utf-8', 'ignore')
data = parse_json_response(page)
if not self.has_gdriveid():
gdriveid = data['info']['user']['cookie']
self.set_gdriveid(gdriveid)
self.save_cookies()
# tasks = parse_json_tasks(data)
tasks = [t for t in parse_json_tasks(data) if not t['expired']]
for t in tasks:
t['client'] = self
current_page = int(re.search(r'page=(\d+)', url).group(1))
total_tasks = int(data['info']['total_num'])
total_pages = total_tasks / self.page_size
if total_tasks % self.page_size != 0:
total_pages += 1
if total_pages == 0:
total_pages = 1
assert total_pages >= data['global_new']['page'].count('<li><a')
if current_page < total_pages:
next = re.sub(r'page=(\d+)', 'page=%d' % (current_page + 1), url)
else:
next = None
return tasks, next
def read_task_page(self, type_id, page=1):
# type_id: 1 for downloading, 2 for completed, 4 for downloading+completed+expired, 11 for deleted, 13 for expired
if type_id == 0:
type_id = 4
page_size = self.page_size
p = 1 # XXX: what is it?
# jsonp = 'jsonp%s' % current_timestamp()
# url = 'http://dynamic.cloud.vip.xunlei.com/interface/showtask_unfresh?type_id=%s&page=%s&tasknum=%s&p=%s&interfrom=task&callback=%s' % (type_id, page, page_size, p, jsonp)
url = 'http://dynamic.cloud.vip.xunlei.com/interface/showtask_unfresh?type_id=%s&page=%s&tasknum=%s&p=%s&interfrom=task' % (type_id, page, page_size, p)
return self.read_task_page_url(url)
def read_tasks(self, type_id=0):
'''read one page'''
tasks = self.read_task_page(type_id)[0]
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_tasks(self, type_id=0):
'''read all pages'''
all_tasks = []
tasks, next_link = self.read_task_page(type_id)
all_tasks.extend(tasks)
while next_link:
tasks, next_link = self.read_task_page_url(next_link)
all_tasks.extend(tasks)
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_completed(self):
'''read first page of completed tasks'''
return self.read_tasks(2)
def read_all_completed(self):
'''read all pages of completed tasks'''
return self.read_all_tasks(2)
def read_categories(self):
# url = 'http://dynamic.cloud.vip.xunlei.com/interface/menu_get?callback=jsonp%s&interfrom=task' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/menu_get'
html = self.urlread(url)
m = re.match(r'rebuild\((\{.*\})\)', html)
if not m:
logger.trace(html)
raise RuntimeError('Invalid response')
result = json.loads(m.group(1))
return dict((x['name'], int(x['id'])) for x in result['info'])
def get_category_id(self, category):
return self.read_categories()[category]
def read_all_tasks_by_category(self, category):
category_id = self.get_category_id(category)
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/show_class?callback=%s&type_id=%d' % (jsonp, category_id)
html = self.urlread(url)
response = json.loads(re.match(r'^%s\((.+)\)$' % jsonp, html).group(1))
assert response['rtcode'] == '0', response['rtcode']
info = response['info']
tasks = map(convert_task, info['tasks'])
for i, task in enumerate(tasks):
task['client'] = self
task['#'] = i
return tasks
def read_history_page_url(self, url):
self.set_cookie('.vip.xunlei.com', 'lx_nf_all', urllib.quote('page_check_all=history&fltask_all_guoqi=1&class_check=0&page_check=task&fl_page_id=0&class_check_new=0&set_tab_status=11'))
page = self.urlread(url).decode('utf-8', 'ignore')
if not self.has_gdriveid():
gdriveid = re.search(r'id="cok" value="([^"]+)"', page).group(1)
self.set_gdriveid(gdriveid)
self.save_cookies()
tasks = parse_history(page)
for t in tasks:
t['client'] = self
pginfo = re.search(r'<div class="pginfo">.*?</div>', page)
match_next_page = re.search(r'<li class="next"><a href="([^"]+)">[^<>]*</a></li>', page)
return tasks, match_next_page and 'http://dynamic.cloud.vip.xunlei.com'+match_next_page.group(1)
def read_history_page(self, type=0, pg=None):
if pg is None:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&type=%d' % (self.id, type)
else:
url = 'http://dynamic.cloud.vip.xunlei.com/user_history?userid=%s&p=%d&type=%d' % (self.id, pg, type)
return self.read_history_page_url(url)
def read_history(self, type=0):
'''read one page'''
tasks = self.read_history_page(type)[0]
for i, task in enumerate(tasks):
task['#'] = i
return tasks
def read_all_history(self, type=0):
'''read all pages of deleted/expired tasks'''
all_tasks = []
tasks, next_link = self.read_history_page(type)
all_tasks.extend(tasks)
while next_link:
tasks, next_link = self.read_history_page_url(next_link)
all_tasks.extend(tasks)
for i, task in enumerate(all_tasks):
task['#'] = i
return all_tasks
def read_deleted(self):
return self.read_history()
def read_all_deleted(self):
return self.read_all_history()
def read_expired(self):
return self.read_history(1)
def read_all_expired(self):
return self.read_all_history(1)
def list_bt(self, task):
assert task['type'] == 'bt'
url = 'http://dynamic.cloud.vip.xunlei.com/interface/fill_bt_list?callback=fill_bt_list&tid=%s&infoid=%s&g_net=1&p=1&uid=%s&noCacheIE=%s' % (task['id'], task['bt_hash'], self.id, current_timestamp())
self.set_page_size(self.bt_page_size)
html = remove_bom(self.urlread(url)).decode('utf-8')
self.set_page_size(self.page_size)
sub_tasks = parse_bt_list(html)
for t in sub_tasks:
t['date'] = task['date']
return sub_tasks
def get_torrent_file_by_info_hash(self, info_hash):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, info_hash.upper())
response = self.urlopen(url)
torrent = response.read()
if torrent == "<meta http-equiv='Content-Type' content='text/html; charset=utf-8' /><script>alert('\xe5\xaf\xb9\xe4\xb8\x8d\xe8\xb5\xb7\xef\xbc\x8c\xe6\xb2\xa1\xe6\x9c\x89\xe6\x89\xbe\xe5\x88\xb0\xe5\xaf\xb9\xe5\xba\x94\xe7\x9a\x84\xe7\xa7\x8d\xe5\xad\x90\xe6\x96\x87\xe4\xbb\xb6!');</script>":
raise Exception('Torrent file not found on xunlei cloud: '+info_hash)
assert response.headers['content-type'] == 'application/octet-stream'
return torrent
def get_torrent_file(self, task):
return self.get_torrent_file_by_info_hash(task['bt_hash'])
def add_task(self, url):
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'ftp', 'thunder', 'Flashget', 'qqdl', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
from lixian_url import url_unmask
url = url_unmask(url)
protocol = parse_url_protocol(url)
assert protocol in ('ed2k', 'http', 'ftp', 'bt', 'magnet'), 'protocol "%s" is not suppoted' % protocol
if protocol == 'bt':
return self.add_torrent_task_by_info_hash(url[5:])
elif protocol == 'magnet':
return self.add_magnet_task(url)
random = current_random()
check_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_check?callback=queryCid&url=%s&random=%s&tcache=%s' % (urllib.quote(url), random, current_timestamp())
js = self.urlread(check_url).decode('utf-8')
qcid = re.match(r'^queryCid(\(.+\))\s*$', js).group(1)
qcid = literal_eval(qcid)
if len(qcid) == 8:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random = qcid
elif len(qcid) == 9:
cid, gcid, size_required, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
elif len(qcid) == 10:
cid, gcid, size_required, some_key, filename, goldbean_need, silverbean_need, is_full, random, ext = qcid
else:
raise NotImplementedError(qcid)
assert goldbean_need == 0
assert silverbean_need == 0
if url.startswith('http://') or url.startswith('ftp://'):
task_type = 0
elif url.startswith('ed2k://'):
task_type = 2
else:
raise NotImplementedError()
task_url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_commit?'+urlencode(
{'callback': 'ret_task',
'uid': self.id,
'cid': cid,
'gcid': gcid,
'size': size_required,
'goldbean': goldbean_need,
'silverbean': silverbean_need,
't': filename,
'url': url,
'type': task_type,
'o_page': 'task',
'o_taskid': '0',
})
response = self.urlread(task_url)
assert response == 'ret_task(Array)', response
def add_batch_tasks(self, urls, old_task_ids=None):
assert urls
urls = list(urls)
for url in urls:
if parse_url_protocol(url) not in ('http', 'ftp', 'ed2k', 'bt', 'thunder', 'magnet'):
raise NotImplementedError('Unsupported: '+url)
urls = filter(lambda u: parse_url_protocol(u) in ('http', 'ftp', 'ed2k', 'thunder'), urls)
if not urls:
return
#self.urlopen('http://dynamic.cloud.vip.xunlei.com/interface/batch_task_check', data={'url':'\r\n'.join(urls), 'random':current_random()})
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/batch_task_commit?callback=%s' % jsonp
if old_task_ids:
batch_old_taskid = ','.join(old_task_ids)
else:
batch_old_taskid = '0' + ',' * (len(urls) - 1) # XXX: what is it?
data = {}
for i in range(len(urls)):
data['cid[%d]' % i] = ''
data['url[%d]' % i] = urllib.quote(to_utf_8(urls[i])) # fix per request #98
data['batch_old_taskid'] = batch_old_taskid
response = self.urlread(url, data=data)
assert_response(response, jsonp, len(urls))
def add_torrent_task_by_content(self, content, path='attachment.torrent'):
assert re.match(r'd\d+:', content), 'Probably not a valid content file [%s...]' % repr(content[:17])
upload_url = 'http://dynamic.cloud.vip.xunlei.com/interface/torrent_upload'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
content_type, body = encode_multipart_formdata([], [('filepath', path, content)])
response = self.urlread(upload_url, data=body, headers={'Content-Type': content_type}).decode('utf-8')
upload_success = re.search(r'<script>document\.domain="xunlei\.com";var btResult =(\{.*\});var btRtcode = 0</script>', response, flags=re.S)
if upload_success:
bt = json.loads(upload_success.group(1))
bt_hash = bt['infoid']
bt_name = bt['ftitle']
bt_size = bt['btsize']
data = {'uid':self.id, 'btname':bt_name, 'cid':bt_hash, 'tsize':bt_size,
'findex':''.join(f['id']+'_' for f in bt['filelist']),
'size':''.join(f['subsize']+'_' for f in bt['filelist']),
'from':'0'}
response = self.urlread(commit_url, data=data)
#assert_response(response, jsonp)
assert re.match(r'%s\({"id":"\d+","progress":1,"rtcode":1}\)' % jsonp, response), repr(response)
return bt_hash
already_exists = re.search(r"parent\.edit_bt_list\((\{.*\}),''\)", response, flags=re.S)
if already_exists:
bt = json.loads(already_exists.group(1))
bt_hash = bt['infoid']
return bt_hash
raise NotImplementedError()
def add_torrent_task_by_info_hash(self, sha1):
return self.add_torrent_task_by_content(self.get_torrent_file_by_info_hash(sha1), sha1.upper()+'.torrent')
def add_torrent_task(self, path):
with open(path, 'rb') as x:
return self.add_torrent_task_by_content(x.read(), os.path.basename(path))
def add_torrent_task_by_info_hash2(self, sha1, old_task_id=None):
'''similar to add_torrent_task_by_info_hash, but faster. I may delete current add_torrent_task_by_info_hash completely in future'''
link = 'http://dynamic.cloud.vip.xunlei.com/interface/get_torrent?userid=%s&infoid=%s' % (self.id, sha1)
return self.add_torrent_task_by_link(link, old_task_id=old_task_id)
def add_magnet_task(self, link):
return self.add_torrent_task_by_link(link)
def add_torrent_task_by_link(self, link, old_task_id=None):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/url_query?callback=queryUrl&u=%s&random=%s' % (urllib.quote(link), current_timestamp())
response = self.urlread(url)
success = re.search(r'queryUrl(\(1,.*\))\s*$', response, flags=re.S)
if not success:
already_exists = re.search(r"queryUrl\(-1,'([^']{40})", response, flags=re.S)
if already_exists:
return already_exists.group(1)
raise NotImplementedError(repr(response))
args = success.group(1).decode('utf-8')
args = literal_eval(args.replace('new Array', ''))
_, cid, tsize, btname, _, names, sizes_, sizes, _, types, findexes, timestamp, _ = args
def toList(x):
if type(x) in (list, tuple):
return x
else:
return [x]
data = {'uid':self.id, 'btname':btname, 'cid':cid, 'tsize':tsize,
'findex':''.join(x+'_' for x in toList(findexes)),
'size':''.join(x+'_' for x in toList(sizes)),
'from':'0'}
if old_task_id:
data['o_taskid'] = old_task_id
data['o_page'] = 'history'
jsonp = 'jsonp%s' % current_timestamp()
commit_url = 'http://dynamic.cloud.vip.xunlei.com/interface/bt_task_commit?callback=%s' % jsonp
response = self.urlread(commit_url, data=data)
#assert_response(response, jsonp)
assert re.match(r'%s\({"id":"\d+","progress":1,"rtcode":1}\)' % jsonp, response), repr(response)
return cid
def readd_all_expired_tasks(self):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/delay_once?callback=anything'
response = self.urlread(url)
def delete_tasks_by_id(self, ids):
jsonp = 'jsonp%s' % current_timestamp()
data = {'taskids': ','.join(ids)+',', 'databases': '0,'}
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_delete?callback=%s&type=%s&noCacheIE=%s' % (jsonp, 2, current_timestamp()) # XXX: what is 'type'?
response = self.urlread(url, data=data)
response = remove_bom(response)
assert_response(response, jsonp, '{"result":1,"type":2}')
def delete_task_by_id(self, id):
self.delete_tasks_by_id([id])
def delete_task(self, task):
self.delete_task_by_id(task['id'])
def delete_tasks(self, tasks):
self.delete_tasks_by_id([t['id'] for t in tasks])
def pause_tasks_by_id(self, ids):
url = 'http://dynamic.cloud.vip.xunlei.com/interface/task_pause?tid=%s&uid=%s&noCacheIE=%s' % (','.join(ids)+',', self.id, current_timestamp())
assert self.urlread(url) == 'pause_task_resp()'
def pause_task_by_id(self, id):
self.pause_tasks_by_id([id])
def pause_task(self, task):
self.pause_task_by_id(task['id'])
def pause_tasks(self, tasks):
self.pause_tasks_by_id(t['id'] for t in tasks)
def restart_tasks(self, tasks):
jsonp = 'jsonp%s' % current_timestamp()
url = 'http://dynamic.cloud.vip.xunlei.com/interface/redownload?callback=%s' % jsonp
form = []
for task in tasks:
assert task['type'] in ('ed2k', 'http', 'ftp', 'https', 'bt'), "'%s' is not tested" % task['type']
data = {'id[]': task['id'],
'cid[]': '', # XXX: should I set this?
'url[]': task['original_url'],
'download_status[]': task['status']}
if task['type'] == 'ed2k':
data['taskname[]'] = task['name'].encode('utf-8') # XXX: shouldn't I set this for other task types?
form.append(urlencode(data))
form.append(urlencode({'type':1}))
data = '&'.join(form)
response = self.urlread(url, data=data)
assert_response(response, jsonp)
def rename_task(self, task, new_name):
assert type(new_name) == unicode
url = 'http://dynamic.cloud.vip.xunlei.com/interface/rename'
taskid = task['id']
bt = '1' if task['type'] == 'bt' else '0'
url = url+'?'+urlencode({'taskid':taskid, 'bt':bt, 'filename':new_name.encode('utf-8')})
response = self.urlread(url)
assert '"result":0' in response, response
def restart_task(self, task):
self.restart_tasks([task])
def get_task_by_id(self, id):
tasks = self.read_all_tasks(0)
for x in tasks:
if x['id'] == id:
return x
raise Exception('No task found for id '+id)
def current_timestamp():
return int(time.time()*1000)
def current_random():
from random import randint
return '%s%06d.%s' % (current_timestamp(), randint(0, 999999), randint(100000000, 9999999999))
def convert_task(data):
expired = {'0':False, '4': True}[data['flag']]
task = {'id': data['id'],
'type': re.match(r'[^:]+', data['url']).group().lower(),
'name': unescape_html(data['taskname']),
'status': int(data['download_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[data['download_status']],
'expired': expired,
'size': int(data['ysfilesize']),
'original_url': unescape_html(data['url']),
'xunlei_url': data['lixian_url'] or None,
'bt_hash': data['cid'],
'dcid': data['cid'],
'gcid': data['gcid'],
'date': data['dt_committed'][:10].replace('-', '.'),
'progress': '%s%%' % data['progress'],
'speed': '%s' % data['speed'],
}
return task
def parse_json_response(html):
m = re.match(r'rebuild\((\{.*\})\)', html)
if not m:
logger.trace(html)
raise RuntimeError('Invalid response')
return json.loads(m.group(1))
def parse_json_tasks(result):
tasks = result['info']['tasks']
return map(convert_task, tasks)
def parse_task(html):
inputs = re.findall(r'<input[^<>]+/>', html)
def parse_attrs(html):
return dict((k, v1 or v2) for k, v1, v2 in re.findall(r'''\b(\w+)=(?:'([^']*)'|"([^"]*)")''', html))
info = dict((x['id'], unescape_html(x['value'])) for x in map(parse_attrs, inputs))
mini_info = {}
mini_map = {}
#mini_info = dict((re.sub(r'\d+$', '', k), info[k]) for k in info)
for k in info:
mini_key = re.sub(r'\d+$', '', k)
mini_info[mini_key] = info[k]
mini_map[mini_key] = k
taskid = mini_map['taskname'][8:]
url = mini_info['f_url']
task_type = re.match(r'[^:]+', url).group().lower()
task = {'id': taskid,
'type': task_type,
'name': mini_info['taskname'],
'status': int(mini_info['d_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[mini_info['d_status']],
'size': int(mini_info.get('ysfilesize', 0)),
'original_url': mini_info['f_url'],
'xunlei_url': mini_info.get('dl_url', None),
'bt_hash': mini_info['dcid'],
'dcid': mini_info['dcid'],
'gcid': parse_gcid(mini_info.get('dl_url', None)),
}
m = re.search(r'<em class="loadnum"[^<>]*>([^<>]*)</em>', html)
task['progress'] = m and m.group(1) or ''
m = re.search(r'<em [^<>]*id="speed\d+">([^<>]*)</em>', html)
task['speed'] = m and m.group(1).replace(' ', '') or ''
m = re.search(r'<span class="c_addtime">([^<>]*)</span>', html)
task['date'] = m and m.group(1) or ''
return task
def parse_history(html):
rwbox = re.search(r'<div class="rwbox" id="rowbox_list".*?<!--rwbox-->', html, re.S).group()
rw_lists = re.findall(r'<div class="rw_list".*?<input id="d_tasktype\d+"[^<>]*/>', rwbox, re.S)
return map(parse_task, rw_lists)
def parse_bt_list(js):
result = json.loads(re.match(r'^fill_bt_list\((.+)\)\s*$', js).group(1))['Result']
files = []
for record in result['Record']:
files.append({
'id': record['taskid'],
'index': record['id'],
'type': 'bt',
'name': record['title'], # TODO: support folder
'status': int(record['download_status']),
'status_text': {'0':'waiting', '1':'downloading', '2':'completed', '3':'failed', '5':'pending'}[record['download_status']],
'size': int(record['filesize']),
'original_url': record['url'],
'xunlei_url': record['downurl'],
'dcid': record['cid'],
'gcid': parse_gcid(record['downurl']),
'speed': '',
'progress': '%s%%' % record['percent'],
'date': '',
})
return files
def parse_gcid(url):
if not url:
return
m = re.search(r'&g=([A-F0-9]{40})&', url)
if not m:
return
return m.group(1)
def urlencode(x):
def unif8(u):
if type(u) == unicode:
u = u.encode('utf-8')
return u
return urllib.urlencode([(unif8(k), unif8(v)) for k, v in x.items()])
def encode_multipart_formdata(fields, files):
#http://code.activestate.com/recipes/146306/
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
import mimetypes
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def assert_default_page(response, id):
#assert response == "<script>top.location='http://dynamic.cloud.vip.xunlei.com/user_task?userid=%s&st=0'</script>" % id
assert re.match(r"^<script>top\.location='http://dynamic\.cloud\.vip\.xunlei\.com/user_task\?userid=%s&st=0(&cache=\d+)?'</script>$" % id, response), response
def remove_bom(response):
if response.startswith('\xef\xbb\xbf'):
response = response[3:]
return response
def assert_response(response, jsonp, value=1):
response = remove_bom(response)
assert response == '%s(%s)' % (jsonp, value), repr(response)
def parse_url_protocol(url):
m = re.match(r'([^:]+)://', url)
if m:
return m.group(1)
elif url.startswith('magnet:'):
return 'magnet'
else:
return url
def unescape_html(html):
import xml.sax.saxutils
return xml.sax.saxutils.unescape(html)
def to_utf_8(s):
if type(s) == unicode:
return s.encode('utf-8')
else:
return s
def md5(s):
import hashlib
return hashlib.md5(s).hexdigest().lower()
def encypt_password(password):
if not re.match(r'^[0-9a-f]{32}$', password):
password = md5(md5(password))
return password
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import math
import urllib
import urlparse
from oslo.config import cfg
from glance.common import auth
from glance.common import exception
import glance.openstack.common.log as logging
import glance.store
import glance.store.base
import glance.store.location
try:
import swiftclient
except ImportError:
pass
LOG = logging.getLogger(__name__)
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
swift_opts = [
cfg.BoolOpt('swift_enable_snet', default=False,
help=_('Whether to use ServiceNET to communicate with the '
'Swift storage servers.')),
cfg.StrOpt('swift_store_auth_address',
help=_('The address where the Swift authentication service '
'is listening.')),
cfg.StrOpt('swift_store_user', secret=True,
help=_('The user to authenticate against the Swift '
'authentication service')),
cfg.StrOpt('swift_store_key', secret=True,
help=_('Auth key for the user authenticating against the '
'Swift authentication service.')),
cfg.StrOpt('swift_store_auth_version', default='2',
help=_('Version of the authentication service to use. '
'Valid versions are 2 for keystone and 1 for swauth '
'and rackspace')),
cfg.BoolOpt('swift_store_auth_insecure', default=False,
help=_('If True, swiftclient won\'t check for a valid SSL '
'certificate when authenticating.')),
cfg.StrOpt('swift_store_region',
help=_('The region of the swift endpoint to be used for '
'single tenant. This setting is only necessary if the '
'tenant has multiple swift endpoints.')),
cfg.StrOpt('swift_store_endpoint_type', default='publicURL',
help=_('A string giving the endpoint type of the swift '
'service to use (publicURL, adminURL or internalURL). '
'This setting is only used if swift_store_auth_version '
'is 2.')),
cfg.StrOpt('swift_store_service_type', default='object-store',
help=_('A string giving the service type of the swift service '
'to use. This setting is only used if '
'swift_store_auth_version is 2.')),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER,
help=_('Container within the account that the account should '
'use for storing images in Swift.')),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('The size, in MB, that Glance will start chunking image '
'files and do a large object manifest in Swift')),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('The amount of data written to a temporary disk buffer '
'during the process of chunking the image file.')),
cfg.BoolOpt('swift_store_create_container_on_put', default=False,
help=_('A boolean value that determines if we create the '
'container if it does not exist.')),
cfg.BoolOpt('swift_store_multi_tenant', default=False,
help=_('If set to True, enables multi-tenant storage '
'mode which causes Glance images to be stored in '
'tenant specific Swift accounts.')),
cfg.ListOpt('swift_store_admin_tenants', default=[],
help=_('A list of tenants that will be granted read/write '
'access on all Swift containers created by Glance in '
'multi-tenant mode.')),
cfg.BoolOpt('swift_store_ssl_compression', default=True,
help=_('If set to False, disables SSL layer compression of '
'https swift requests. Setting to False may improve '
'performance for images which are already in a '
'compressed format, eg qcow2.')),
]
CONF = cfg.CONF
CONF.register_opts(swift_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:pass@authurl.com/container/obj-id
swift://account:user:pass@authurl.com/container/obj-id
swift+http://user:pass@authurl.com/container/obj-id
swift+https://user:pass@authurl.com/container/obj-id
When using multi-tenant a URI might look like this (a storage URL):
swift+https://example.com/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.auth_or_store_url = self.specs.get('auth_or_store_url')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user and self.key:
return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key))
return ''
def get_uri(self):
auth_or_store_url = self.auth_or_store_url
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = self._get_credstring()
auth_or_store_url = auth_or_store_url.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url,
container, obj)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:pass@authurl.com/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:pass@authurl.com/v1/container/obj")
LOG.debug(_("Invalid store URI: %(reason)s") % locals())
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
if len(cred_parts) != 2:
reason = (_("Badly formed credentials in Swift URI."))
LOG.debug(reason)
raise exception.BadStoreUri()
user, key = cred_parts
self.user = urllib.unquote(user)
self.key = urllib.unquote(key)
else:
self.user = None
self.key = None
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI.")
LOG.debug(reason)
raise exception.BadStoreUri()
@property
def swift_url(self):
"""
Creates a fully-qualified auth url that the Swift client library can
use. The scheme for the auth_url is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.auth_or_store_url.startswith('http'):
return self.auth_or_store_url
else:
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
return ''.join([auth_scheme, self.auth_or_store_url])
def Store(context=None, loc=None):
if (CONF.swift_store_multi_tenant and
(loc is None or loc.store_location.user is None)):
return MultiTenantStore(context, loc)
return SingleTenantStore(context, loc)
class BaseStore(glance.store.base.Store):
CHUNKSIZE = 65536
def get_schemes(self):
return ('swift+https', 'swift', 'swift+http')
def configure(self):
_obj_size = self._option_get('swift_store_large_object_size')
self.large_object_size = _obj_size * ONE_MB
_chunk_size = self._option_get('swift_store_large_object_chunk_size')
self.large_object_chunk_size = _chunk_size * ONE_MB
self.admin_tenants = CONF.swift_store_admin_tenants
self.region = CONF.swift_store_region
self.service_type = CONF.swift_store_service_type
self.endpoint_type = CONF.swift_store_endpoint_type
self.snet = CONF.swift_enable_snet
self.insecure = CONF.swift_store_auth_insecure
self.ssl_compression = CONF.swift_store_ssl_compression
def get(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers, resp_body = connection.get_object(
container=location.container, obj=location.obj,
resp_chunk_size=self.CHUNKSIZE)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
length = int(resp_headers.get('content-length', 0))
return (ResponseIndexable(resp_body, length), length)
def get_size(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers = connection.head_object(
container=location.container, obj=location.obj)
return int(resp_headers.get('content-length', 0))
except Exception:
return 0
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % locals())
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def _delete_stale_chunks(self, connection, container, chunk_list):
for chunk in chunk_list:
LOG.debug(_("Deleting chunk %s") % chunk)
try:
connection.delete_object(container, chunk)
except Exception:
msg = _("Failed to delete orphaned chunk %s/%s")
LOG.exception(msg, container, chunk)
def add(self, image_id, image_file, image_size, connection=None):
location = self.create_location(image_id)
if not connection:
connection = self.get_connection(location)
self._create_container_if_missing(location.container, connection)
LOG.debug(_("Adding image object '%(obj_name)s' "
"to Swift") % dict(obj_name=location.obj))
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = connection.put_object(location.container,
location.obj, image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
math.ceil(float(image_size) /
float(self.large_object_chunk_size))))
else:
# image_size == 0 is when we don't know the size
# of the image. This can occur with older clients
# that don't inspect the payload size.
LOG.debug(_("Cannot determine image size. Adding as a "
"segmented object to Swift."))
total_chunks = '?'
checksum = hashlib.md5()
written_chunks = []
combined_chunks_size = 0
while True:
chunk_size = self.large_object_chunk_size
if image_size == 0:
content_length = None
else:
left = image_size - combined_chunks_size
if left == 0:
break
if chunk_size > left:
chunk_size = left
content_length = chunk_size
chunk_name = "%s-%05d" % (location.obj, chunk_id)
reader = ChunkReader(image_file, checksum, chunk_size)
try:
chunk_etag = connection.put_object(
location.container, chunk_name, reader,
content_length=content_length)
written_chunks.append(chunk_name)
except Exception:
# Delete orphaned segments from swift backend
LOG.exception(_("Error during chunked upload to "
"backend, deleting stale chunks"))
self._delete_stale_chunks(connection,
location.container,
written_chunks)
raise
bytes_read = reader.bytes_read
msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/"
"%(total_chunks)s) of length %(bytes_read)d "
"to Swift returning MD5 of content: "
"%(chunk_etag)s")
LOG.debug(msg % locals())
if bytes_read == 0:
# Delete the last chunk, because it's of zero size.
# This will happen if size == 0.
LOG.debug(_("Deleting final zero-length chunk"))
connection.delete_object(location.container,
chunk_name)
break
chunk_id += 1
combined_chunks_size += bytes_read
# In the case we have been given an unknown image size,
# set the size to the total size of the combined chunks.
if image_size == 0:
image_size = combined_chunks_size
# Now we write the object manifest and return the
# manifest's etag...
manifest = "%s/%s-" % (location.container, location.obj)
headers = {'ETag': hashlib.md5("").hexdigest(),
'X-Object-Manifest': manifest}
# The ETag returned for the manifest is actually the
# MD5 hash of the concatenated checksums of the strings
# of each chunk...so we ignore this result in favour of
# the MD5 of the entire image file contents, so that
# users can verify the image file contents accordingly
connection.put_object(location.container, location.obj,
None, headers=headers)
obj_etag = checksum.hexdigest()
# NOTE: We return the user and key here! Have to because
# location is used by the API server to return the actual
# image data. We *really* should consider NOT returning
# the location attribute from GET /images/<ID> and
# GET /images/details
return (location.get_uri(), image_size, obj_etag, {})
except swiftclient.ClientException as e:
if e.http_status == httplib.CONFLICT:
raise exception.Duplicate(_("Swift already has an image at "
"this location"))
msg = (_("Failed to add object to Swift.\n"
"Got error from Swift: %(e)s") % locals())
LOG.error(msg)
raise glance.store.BackendException(msg)
def delete(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
# We request the manifest for the object. If one exists,
# that means the object was uploaded in chunks/segments,
# and we need to delete all the chunks as well as the
# manifest.
manifest = None
try:
headers = connection.head_object(
location.container, location.obj)
manifest = headers.get('x-object-manifest')
except swiftclient.ClientException as e:
if e.http_status != httplib.NOT_FOUND:
raise
if manifest:
# Delete all the chunks before the object manifest itself
obj_container, obj_prefix = manifest.split('/', 1)
segments = connection.get_container(
obj_container, prefix=obj_prefix)[1]
for segment in segments:
# TODO(jaypipes): This would be an easy area to parallelize
# since we're simply sending off parallelizable requests
# to Swift to delete stuff. It's not like we're going to
# be hogging up network or file I/O here...
connection.delete_object(obj_container,
segment['name'])
# Delete object (or, in segmented case, the manifest)
connection.delete_object(location.container, location.obj)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
def _create_container_if_missing(self, container, connection):
"""
Creates a missing container in Swift if the
``swift_store_create_container_on_put`` option is set.
:param container: Name of container to create
:param connection: Connection to swift service
"""
try:
connection.head_container(container)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
if CONF.swift_store_create_container_on_put:
try:
connection.put_container(container)
except swiftclient.ClientException as e:
msg = _("Failed to add container to Swift.\n"
"Got error from Swift: %(e)s") % locals()
raise glance.store.BackendException(msg)
else:
msg = (_("The container %(container)s does not exist in "
"Swift. Please set the "
"swift_store_create_container_on_put option"
"to add container to Swift automatically.") %
locals())
raise glance.store.BackendException(msg)
else:
raise
def get_connection(self):
raise NotImplemented()
def create_location(self):
raise NotImplemented()
class SingleTenantStore(BaseStore):
EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>"
def configure(self):
super(SingleTenantStore, self).configure()
self.auth_version = self._option_get('swift_store_auth_version')
def configure_add(self):
self.auth_address = self._option_get('swift_store_auth_address')
if self.auth_address.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
self.container = CONF.swift_store_container
self.user = self._option_get('swift_store_user')
self.key = self._option_get('swift_store_key')
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container,
'obj': str(image_id),
'auth_or_store_url': self.auth_address,
'user': self.user,
'key': self.key}
return StoreLocation(specs)
def get_connection(self, location):
if not location.user:
reason = (_("Location is missing user:password information."))
LOG.debug(reason)
raise exception.BadStoreUri(message=reason)
auth_url = location.swift_url
if not auth_url.endswith('/'):
auth_url += '/'
if self.auth_version == '2':
try:
tenant_name, user = location.user.split(':')
except ValueError:
reason = (_("Badly formed tenant:user '%(user)s' in "
"Swift URI") % {'user': location.user})
LOG.debug(reason)
raise exception.BadStoreUri()
else:
tenant_name = None
user = location.user
os_options = {}
if self.region:
os_options['region_name'] = self.region
os_options['endpoint_type'] = self.endpoint_type
os_options['service_type'] = self.service_type
return swiftclient.Connection(
auth_url, user, location.key, insecure=self.insecure,
tenant_name=tenant_name, snet=self.snet,
auth_version=self.auth_version, os_options=os_options,
ssl_compression=self.ssl_compression)
class MultiTenantStore(BaseStore):
EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>"
def configure_add(self):
self.container = CONF.swift_store_container
if self.context is None:
reason = _("Multi-tenant Swift storage requires a context.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
if self.context.service_catalog is None:
reason = _("Multi-tenant Swift storage requires "
"a service catalog.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
self.storage_url = auth.get_endpoint(
self.context.service_catalog, service_type=self.service_type,
endpoint_region=self.region, endpoint_type=self.endpoint_type)
if self.storage_url.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
def delete(self, location, connection=None):
if not connection:
connection = self.get_connection(location.store_location)
super(MultiTenantStore, self).delete(location, connection)
connection.delete_container(location.store_location.container)
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
if read_tenants is None:
read_tenants = []
if write_tenants is None:
write_tenants = []
headers = {}
if public:
headers['X-Container-Read'] = ".r:*,.rlistings"
elif read_tenants:
headers['X-Container-Read'] = ','.join('%s:*' % i
for i in read_tenants)
else:
headers['X-Container-Read'] = ''
write_tenants.extend(self.admin_tenants)
if write_tenants:
headers['X-Container-Write'] = ','.join('%s:*' % i
for i in write_tenants)
else:
headers['X-Container-Write'] = ''
try:
connection.post_container(location.container, headers=headers)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container + '_' + str(image_id),
'obj': str(image_id),
'auth_or_store_url': self.storage_url}
return StoreLocation(specs)
def get_connection(self, location):
return swiftclient.Connection(
None, self.context.user, None,
preauthurl=location.swift_url,
preauthtoken=self.context.auth_tok,
tenant_name=self.context.tenant,
auth_version='2', snet=self.snet, insecure=self.insecure,
ssl_compression=self.ssl_compression)
class ChunkReader(object):
def __init__(self, fd, checksum, total):
self.fd = fd
self.checksum = checksum
self.total = total
self.bytes_read = 0
def read(self, i):
left = self.total - self.bytes_read
if i > left:
i = left
result = self.fd.read(i)
self.bytes_read += len(result)
self.checksum.update(result)
return result
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains Google Kubernetes Engine operators.
"""
import os
import subprocess
import tempfile
from typing import Dict, Optional, Union
from google.cloud.container_v1.types import Cluster
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
from airflow.utils.decorators import apply_defaults
class GKEDeleteClusterOperator(BaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param name: The name of the resource to delete, in this case cluster name
:type name: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'name', 'location', 'api_version']
@apply_defaults
def __init__(self,
name: str,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self._check_input()
def _check_input(self):
if not all([self.project_id, self.name, self.location]):
self.log.error(
'One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context):
hook = GKEHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKECreateClusterOperator(BaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions
The operator will wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:type body: dict or google.cloud.container_v1.types.Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'location', 'api_version', 'body']
@apply_defaults
def __init__(self,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self._check_input()
def _check_input(self):
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body) or
(getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context):
hook = GKEHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEStartPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes
Engine cluster
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
**Operator Creation**: ::
operator = GKEPodOperator(task_id='pod_op',
project_id='my-project',
location='us-central1-a',
cluster_name='my-cluster-name',
name='task-name',
namespace='default',
image='perl')
.. seealso::
For more detail about application authentication have a look at the reference:
https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
:param location: The name of the Google Kubernetes Engine zone in which the
cluster resides, e.g. 'us-central1-a'
:type location: str
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:type cluster_name: str
:param project_id: The Google Developers Console project id
:type project_id: str
:param gcp_conn_id: The google cloud connection id to use. This allows for
users to specify a service account.
:type gcp_conn_id: str
"""
template_fields = ('project_id', 'location',
'cluster_name') + KubernetesPodOperator.template_fields
@apply_defaults
def __init__(self,
location: str,
cluster_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
def execute(self, context):
hook = CloudBaseHook(gcp_conn_id=self.gcp_conn_id)
self.project_id = self.project_id or hook.project_id
if not self.project_id:
raise AirflowException("The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in GCP connection definition. Both are not set!")
# Write config to a temp file and set the environment variable to point to it.
# This is to avoid race conditions of reading/writing a single file
with tempfile.NamedTemporaryFile() as conf_file:
os.environ[KUBE_CONFIG_ENV_VAR] = conf_file.name
with hook.provide_gcp_credential_file_as_context():
# Attempt to get/update credentials
# We call gcloud directly instead of using google-cloud-python api
# because there is no way to write kubernetes config to a file, which is
# required by KubernetesPodOperator.
# The gcloud command looks at the env variable `KUBECONFIG` for where to save
# the kubernetes config file.
subprocess.check_call(
["gcloud", "container", "clusters", "get-credentials",
self.cluster_name,
"--zone", self.location,
"--project", self.project_id])
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
| |
from gusto import *
from firedrake import (FunctionSpace, as_vector,
VectorFunctionSpace,
PeriodicIntervalMesh,
ExtrudedMesh,
SpatialCoordinate, exp,
pi, cos, Function,
conditional, Mesh, sin, op2)
from firedrake.petsc import PETSc
from argparse import ArgumentParser
import sys
def minimum(f):
fmin = op2.Global(1, [1000], dtype=float)
op2.par_loop(op2.Kernel("""
void minify(double *a, double *b) {
a[0] = a[0] > fabs(b[0]) ? fabs(b[0]) : a[0];
}
""", "minify"), f.dof_dset.set, fmin(op2.MIN), f.dat(op2.READ))
return fmin.data[0]
PETSc.Log.begin()
parser = ArgumentParser(description="""Flow over an isolated mountain (non-hydrostatic).""",
add_help=False)
parser.add_argument("--test",
action="store_true",
help="Enable a quick test run.")
parser.add_argument("--dt",
action="store",
default=5.0,
type=float,
help="Time step size (s)")
parser.add_argument("--res",
default=1,
type=int,
action="store",
help="Resolution scaling parameter.")
parser.add_argument("--debug",
action="store_true",
help="Turn on KSP monitors")
parser.add_argument("--help",
action="store_true",
help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(1)
res = args.res
nlayers = res*140 # horizontal layers
columns = res*360 # number of columns
dt = args.dt # Time steps (s)
if args.test:
tmax = dt
else:
tmax = 9000.
H = 35000. # Height position of the model top
L = 144000.
dx = L / columns
cfl = 10.0 * dt / dx
dz = H / nlayers
PETSc.Sys.Print("""
Problem parameters:\n
Test case: Non-hydrostatic gravity wave over an isolated mountain.\n
Time-step size: %s,\n
Dx (m): %s,\n
Dz (m): %s,\n
CFL: %s,\n
Test run: %s.\n
""" % (dt, dx, dz, cfl,
bool(args.test)))
PETSc.Sys.Print("Initializing problem with dt: %s and tmax: %s.\n" % (dt,
tmax))
PETSc.Sys.Print("Creating mesh with %s columns and %s layers...\n" % (columns,
nlayers))
m = PeriodicIntervalMesh(columns, L)
ext_mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
Vc = VectorFunctionSpace(ext_mesh, "DG", 2)
coord = SpatialCoordinate(ext_mesh)
x = Function(Vc).interpolate(as_vector([coord[0], coord[1]]))
a = 1000.
xc = L/2.
x, z = SpatialCoordinate(ext_mesh)
hm = 1.
zs = hm*a**2/((x-xc)**2 + a**2)
smooth_z = True
if smooth_z:
zh = 5000.
xexpr = as_vector([x, conditional(z < zh, z + cos(0.5*pi*z/zh)**6*zs, z)])
else:
xexpr = as_vector([x, z + ((H-z)/H)*zs])
new_coords = Function(Vc).interpolate(xexpr)
mesh = Mesh(new_coords)
# sponge function
W_DG = FunctionSpace(mesh, "DG", 2)
x, z = SpatialCoordinate(mesh)
zc = H-10000.
mubar = 0.15/dt
mu_top = conditional(z <= zc, 0.0, mubar*sin((pi/2.)*(z-zc)/(H-zc))**2)
mu = Function(W_DG).interpolate(mu_top)
fieldlist = ['u', 'rho', 'theta']
timestepping = TimesteppingParameters(dt=dt)
dirname = "hybrid_nh_mountain_smootherz_dx%s_dt%s" % (dx, dt)
dumptime = 1000
dumpfreq = int(dumptime / dt)
output = OutputParameters(dirname=dirname,
dumpfreq=dumpfreq,
dumplist=['u'],
perturbation_fields=['theta', 'rho'],
log_level='INFO')
cparameters = CompressibleParameters(g=9.80665, cp=1004.)
diagnostics = Diagnostics(*fieldlist)
diagnostic_fields = [CourantNumber(),
VelocityZ()]
state = State(mesh,
vertical_degree=1,
horizontal_degree=1,
family="CG",
sponge_function=mu,
timestepping=timestepping,
output=output,
parameters=cparameters,
diagnostics=diagnostics,
fieldlist=fieldlist,
diagnostic_fields=diagnostic_fields)
# Initial conditions
u0 = state.fields("u")
rho0 = state.fields("rho")
theta0 = state.fields("theta")
# Spaces
Vu = u0.function_space()
Vt = theta0.function_space()
Vr = rho0.function_space()
# Thermodynamic constants required for setting initial conditions
# and reference profiles
g = cparameters.g
N = cparameters.N
p_0 = cparameters.p_0
c_p = cparameters.cp
R_d = cparameters.R_d
kappa = cparameters.kappa
# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz)
Tsurf = 300.
thetab = Tsurf*exp(N**2*z/g)
theta_b = Function(Vt).interpolate(thetab)
# Calculate hydrostatic Pi
PETSc.Sys.Print("Computing hydrostatic varaibles...\n")
# Use vertical hybridization preconditioner for the balance initialization
piparams = {'ksp_type': 'gmres',
'ksp_monitor_true_residual': None,
'pc_type': 'python',
'mat_type': 'matfree',
'pc_python_type': 'gusto.VerticalHybridizationPC',
# Vertical trace system is only coupled vertically in columns
# block ILU is a direct solver!
'vert_hybridization': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
Pi = Function(Vr)
rho_b = Function(Vr)
compressible_hydrostatic_balance(state,
theta_b,
rho_b,
Pi, top=True,
pi_boundary=0.5,
params=piparams)
p0 = minimum(Pi)
compressible_hydrostatic_balance(state,
theta_b,
rho_b,
Pi,
top=True,
params=piparams)
p1 = minimum(Pi)
alpha = 2.*(p1-p0)
beta = p1-alpha
pi_top = (1.-beta)/alpha
compressible_hydrostatic_balance(state,
theta_b,
rho_b,
Pi,
top=True,
pi_boundary=pi_top,
solve_for_rho=True,
params=piparams)
theta0.assign(theta_b)
rho0.assign(rho_b)
u0.project(as_vector([10.0, 0.0]))
remove_initial_w(u0, state.Vv)
PETSc.Sys.Print("Finished computing hydrostatic varaibles...\n")
state.initialise([('u', u0),
('rho', rho0),
('theta', theta0)])
state.set_reference_profiles([('rho', rho_b),
('theta', theta_b)])
# Set up advection schemes
ueqn = EulerPoincare(state, Vu)
rhoeqn = AdvectionEquation(state, Vr, equation_form="continuity")
supg = True
if supg:
thetaeqn = SUPGAdvection(state, Vt, equation_form="advective")
else:
thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective",
options=EmbeddedDGOptions())
advected_fields = []
advected_fields.append(("u", ThetaMethod(state, u0, ueqn)))
advected_fields.append(("rho", SSPRK3(state, rho0, rhoeqn)))
advected_fields.append(("theta", SSPRK3(state, theta0, thetaeqn)))
# Set up linear solver
solver_parameters = {'mat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
'pc_python_type': 'firedrake.SCPC',
'pc_sc_eliminate_fields': '0, 1',
# The reduced operator is not symmetric
'condensed_field': {'ksp_type': 'fgmres',
'ksp_rtol': 1.0e-8,
'ksp_atol': 1.0e-8,
'ksp_max_it': 100,
'pc_type': 'gamg',
'pc_gamg_sym_graph': None,
'mg_levels': {'ksp_type': 'gmres',
'ksp_max_it': 5,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}}
if args.debug:
solver_parameters['condensed_field']['ksp_monitor_true_residual'] = None
linear_solver = CompressibleSolver(state,
solver_parameters=solver_parameters,
overwrite_solver_parameters=True)
# Set up forcing
compressible_forcing = CompressibleForcing(state)
# Build time stepper
stepper = CrankNicolson(state,
advected_fields,
linear_solver,
compressible_forcing)
PETSc.Sys.Print("Starting simulation...\n")
stepper.run(t=0, tmax=tmax)
| |
#!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_surprise_plug')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from flexbe_states.input_state import InputState
from vigir_behavior_walk_to_template.walk_to_template_sm import WalktoTemplateSM
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_behavior_grasp_object.grasp_object_sm import GraspObjectSM
from vigir_flexbe_states.get_template_affordance_state import GetTemplateAffordanceState
from vigir_flexbe_states.plan_affordance_state import PlanAffordanceState
from vigir_flexbe_states.execute_trajectory_msg_state import ExecuteTrajectoryMsgState
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from vigir_flexbe_states.get_template_pregrasp_state import GetTemplatePregraspState
from flexbe_states.calculation_state import CalculationState
from vigir_flexbe_states.plan_endeffector_pose_state import PlanEndeffectorPoseState
from vigir_flexbe_states.get_template_grasp_state import GetTemplateGraspState
from vigir_flexbe_states.plan_endeffector_cartesian_waypoints_state import PlanEndeffectorCartesianWaypointsState
from vigir_flexbe_states.finger_configuration_state import FingerConfigurationState
from vigir_flexbe_states.detach_object_state import DetachObjectState
from vigir_flexbe_states.attach_object_state import AttachObjectState
from flexbe_states.log_state import LogState
from vigir_flexbe_states.moveit_predefined_pose_state import MoveitPredefinedPoseState
from vigir_flexbe_states.footstep_plan_relative_state import FootstepPlanRelativeState
from vigir_flexbe_states.execute_step_plan_action_state import ExecuteStepPlanActionState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Mon Jun 01 2015
@author: Philipp Schillinger, Dorothea Koert
'''
class SurprisePlugSM(Behavior):
'''
do the plug surprise task
'''
def __init__(self):
super(SurprisePlugSM, self).__init__()
self.name = 'Surprise Plug'
# parameters of this behavior
self.add_parameter('hand_side', 'left')
self.add_parameter('hand_type', 'vt_hand')
# references to used behaviors
self.add_behavior(WalktoTemplateSM, 'Walk_To_Template')
self.add_behavior(GraspObjectSM, 'Grasp Object')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
plug_in_affordance = "plug_in"
plug_out_affordance = "plug_out"
arm_controller = ExecuteTrajectoryMsgState.CONTROLLER_LEFT_ARM if self.hand_side == 'left' else ExecuteTrajectoryMsgState.CONTROLLER_RIGHT_ARM
# x:202 y:568, x:374 y:401
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.none = None
_state_machine.userdata.hand_side = self.hand_side
_state_machine.userdata.grasp_preference = 0
_state_machine.userdata.step_back_distance = 1.0 # meters
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:588 y:126, x:324 y:44
_sm_back_to_pregrasp_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['template_id', 'hand_side', 'grasp_preference'])
with _sm_back_to_pregrasp_0:
# x:30 y:40
OperatableStateMachine.add('Get_Pregrasp',
GetTemplatePregraspState(),
transitions={'done': 'Extract_Frame_Id', 'failed': 'failed', 'not_available': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full, 'not_available': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'pre_grasp': 'grasp_pose'})
# x:242 y:292
OperatableStateMachine.add('Plan_To_Pregrasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pregrasp_Pose', 'incomplete': 'Move_To_Pregrasp_Pose', 'failed': 'Get_Pregrasp'},
autonomy={'planned': Autonomy.Low, 'incomplete': Autonomy.High, 'failed': Autonomy.Full},
remapping={'waypoints': 'grasp_waypoints', 'hand': 'hand_side', 'frame_id': 'grasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:568 y:295
OperatableStateMachine.add('Move_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'finished', 'failed': 'Get_Pregrasp'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:41 y:178
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_frame_id'})
# x:40 y:293
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_To_Pregrasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_waypoints'})
# x:598 y:30, x:220 y:148, x:1035 y:174
_sm_go_to_grasp_1 = OperatableStateMachine(outcomes=['finished', 'failed', 'again'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference'])
with _sm_go_to_grasp_1:
# x:33 y:49
OperatableStateMachine.add('Get_Grasp_Info',
GetTemplateGraspState(),
transitions={'done': 'Extract_Frame_Id', 'failed': 'failed', 'not_available': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full, 'not_available': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'grasp': 'grasp_pose'})
# x:40 y:293
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_To_Grasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_waypoints'})
# x:242 y:292
OperatableStateMachine.add('Plan_To_Grasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Grasp_Pose', 'incomplete': 'Move_To_Grasp_Pose', 'failed': 'Decide_Which_Grasp'},
autonomy={'planned': Autonomy.High, 'incomplete': Autonomy.High, 'failed': Autonomy.Full},
remapping={'waypoints': 'grasp_waypoints', 'hand': 'hand_side', 'frame_id': 'grasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:494 y:175
OperatableStateMachine.add('Move_To_Grasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Optional_Template_Adjustment', 'failed': 'Decide_Which_Grasp'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:970 y:294
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'again'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:41 y:178
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_frame_id'})
# x:727 y:50
OperatableStateMachine.add('Optional_Template_Adjustment',
OperatorDecisionState(outcomes=["grasp", "pregrasp", "skip"], hint="Consider adjusting the template's pose", suggestion="skip"),
transitions={'grasp': 'Get_Grasp_Info', 'pregrasp': 'again', 'skip': 'finished'},
autonomy={'grasp': Autonomy.Full, 'pregrasp': Autonomy.Full, 'skip': Autonomy.High})
# x:754 y:294
OperatableStateMachine.add('Decide_Which_Grasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same grasp or the next one?', suggestion='same'),
transitions={'same': 'Optional_Template_Adjustment', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.High})
# x:596 y:113, x:351 y:62
_sm_go_to_pregrasp_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference', 'pregrasp_pose'])
with _sm_go_to_pregrasp_2:
# x:27 y:68
OperatableStateMachine.add('Get_Pregrasp_Info',
GetTemplatePregraspState(),
transitions={'done': 'Plan_To_Pregrasp_Pose', 'failed': 'failed', 'not_available': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full, 'not_available': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'pre_grasp': 'pregrasp_pose'})
# x:537 y:228
OperatableStateMachine.add('Move_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'finished', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:25 y:328
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'Get_Pregrasp_Info'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:266 y:228
OperatableStateMachine.add('Plan_To_Pregrasp_Pose',
PlanEndeffectorPoseState(ignore_collisions=False, include_torso=False, allowed_collisions=[], planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pregrasp_Pose', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'planned': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'target_pose': 'pregrasp_pose', 'hand': 'hand_side', 'joint_trajectory': 'joint_trajectory'})
# x:266 y:327
OperatableStateMachine.add('Decide_Which_Pregrasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same pregrasp or the next one?', suggestion='same'),
transitions={'same': 'Get_Pregrasp_Info', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.Full})
# x:30 y:365, x:130 y:365
_sm_perform_step_back_3 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['step_back_distance'])
with _sm_perform_step_back_3:
# x:78 y:78
OperatableStateMachine.add('Plan_Steps_Back',
FootstepPlanRelativeState(direction=FootstepPlanRelativeState.DIRECTION_BACKWARD),
transitions={'planned': 'Do_Steps_Back', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Full},
remapping={'distance': 'step_back_distance', 'plan_header': 'plan_header'})
# x:74 y:228
OperatableStateMachine.add('Do_Steps_Back',
ExecuteStepPlanActionState(),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'plan_header': 'plan_header'})
# x:550 y:574, x:130 y:365
_sm_plug_in_cable_4 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['target_template_id', 'hand_side', 'grasp_preference', 'template_id', 'template_pose'])
with _sm_plug_in_cable_4:
# x:82 y:59
OperatableStateMachine.add('Go_to_Pregrasp',
_sm_go_to_pregrasp_2,
transitions={'finished': 'Go_to_Grasp', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'target_template_id', 'pregrasp_pose': 'pregrasp_pose'})
# x:322 y:171
OperatableStateMachine.add('Go_to_Grasp',
_sm_go_to_grasp_1,
transitions={'finished': 'Detach_Cable', 'failed': 'failed', 'again': 'Go_to_Pregrasp'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'again': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'target_template_id'})
# x:516 y:306
OperatableStateMachine.add('Open_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=0),
transitions={'done': 'Back_To_Pregrasp', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'hand_side': 'hand_side'})
# x:472 y:406
OperatableStateMachine.add('Back_To_Pregrasp',
_sm_back_to_pregrasp_0,
transitions={'finished': 'Close_Fingers', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference'})
# x:314 y:524
OperatableStateMachine.add('Close_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=1),
transitions={'done': 'finished', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'hand_side': 'hand_side'})
# x:527 y:204
OperatableStateMachine.add('Detach_Cable',
DetachObjectState(),
transitions={'done': 'Open_Fingers', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'template_id': 'template_id', 'template_pose': 'template_pose'})
# x:30 y:365, x:115 y:197
_sm_take_cable_5 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['template_id', 'hand_side', 'none'])
with _sm_take_cable_5:
# x:81 y:54
OperatableStateMachine.add('Get_Plug_Out_Affordance',
GetTemplateAffordanceState(identifier=plug_out_affordance),
transitions={'done': 'Plan_Plug_Out', 'failed': 'failed', 'not_available': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full, 'not_available': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'affordance': 'affordance'})
# x:348 y:73
OperatableStateMachine.add('Plan_Plug_Out',
PlanAffordanceState(vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'done': 'Execute_Plug_Out', 'incomplete': 'Execute_Plug_Out', 'failed': 'failed'},
autonomy={'done': Autonomy.High, 'incomplete': Autonomy.High, 'failed': Autonomy.Full},
remapping={'affordance': 'affordance', 'hand': 'hand_side', 'reference_point': 'none', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:322 y:255
OperatableStateMachine.add('Execute_Plug_Out',
ExecuteTrajectoryMsgState(controller=affordance_controller),
transitions={'done': 'finished', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'joint_trajectory': 'joint_trajectory'})
with _state_machine:
# x:73 y:78
OperatableStateMachine.add('Request_Cable_Template',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Place cable template"),
transitions={'received': 'Decide_Walking', 'aborted': 'failed', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.Full, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'template_id'})
# x:330 y:172
OperatableStateMachine.add('Walk_To_Template',
self.use_behavior(WalktoTemplateSM, 'Walk_To_Template'),
transitions={'finished': 'Set_Manipulate', 'failed': 'failed', 'aborted': 'Set_Manipulate'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'aborted': Autonomy.Inherit},
remapping={'grasp_preference': 'grasp_preference', 'hand_side': 'hand_side', 'template_id': 'template_id'})
# x:337 y:78
OperatableStateMachine.add('Decide_Walking',
OperatorDecisionState(outcomes=["walk", "stand"], hint="Walk to template?", suggestion="walk"),
transitions={'walk': 'Walk_To_Template', 'stand': 'Set_Manipulate'},
autonomy={'walk': Autonomy.High, 'stand': Autonomy.Full})
# x:839 y:72
OperatableStateMachine.add('Grasp Object',
self.use_behavior(GraspObjectSM, 'Grasp Object'),
transitions={'finished': 'Attach_Cable', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'template_id': 'template_id'})
# x:843 y:302
OperatableStateMachine.add('Take_Cable',
_sm_take_cable_5,
transitions={'finished': 'Request_Target_Template', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'none': 'none'})
# x:566 y:78
OperatableStateMachine.add('Set_Manipulate',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.MANIPULATE),
transitions={'changed': 'Grasp Object', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.Full})
# x:822 y:545
OperatableStateMachine.add('Plug_In_Cable',
_sm_plug_in_cable_4,
transitions={'finished': 'Warn_Stand', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'target_template_id': 'target_template_id', 'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id', 'template_pose': 'template_pose'})
# x:826 y:185
OperatableStateMachine.add('Attach_Cable',
AttachObjectState(),
transitions={'done': 'Take_Cable', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'template_pose': 'template_pose'})
# x:811 y:400
OperatableStateMachine.add('Request_Target_Template',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Place target template"),
transitions={'received': 'Plug_In_Cable', 'aborted': 'failed', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.High, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'target_template_id'})
# x:753 y:646
OperatableStateMachine.add('Warn_Stand',
LogState(text="Going to stand pose", severity=Logger.REPORT_INFO),
transitions={'done': 'Go_To_Stand_Pose'},
autonomy={'done': Autonomy.High})
# x:581 y:573
OperatableStateMachine.add('Go_To_Stand_Pose',
MoveitPredefinedPoseState(target_pose=MoveitPredefinedPoseState.STAND_POSE, vel_scaling=0.1, ignore_collisions=False, link_paddings={}, is_cartesian=False),
transitions={'done': 'Set_Stand', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'side': 'none'})
# x:446 y:653
OperatableStateMachine.add('Decide_Step_Back',
OperatorDecisionState(outcomes=["walk", "stand"], hint="Step back?", suggestion="walk"),
transitions={'walk': 'Perform_Step_Back', 'stand': 'finished'},
autonomy={'walk': Autonomy.High, 'stand': Autonomy.Full})
# x:100 y:661
OperatableStateMachine.add('Perform_Step_Back',
_sm_perform_step_back_3,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'step_back_distance': 'step_back_distance'})
# x:383 y:527
OperatableStateMachine.add('Set_Stand',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND),
transitions={'changed': 'Decide_Step_Back', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.Full})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, noise.shape[1].value)
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
def mock_head(testcase, expected_generator_inputs, expected_real_data,
generator_scope_name):
"""Returns a mock head that validates logits values and variable names."""
discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
generator_var_names = set([
'%s/fully_connected/weights:0' % generator_scope_name,
'%s/fully_connected/biases:0' % generator_scope_name])
discriminator_var_names = set([
'%s/fully_connected/weights:0' % discriminator_scope_name,
'%s/fully_connected/biases:0' % discriminator_scope_name])
def _create_estimator_spec(features, mode, logits, labels):
gan_model = logits # renaming for clarity
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
testcase.assertIsNone(features)
testcase.assertIsNone(labels)
testcase.assertIsInstance(gan_model, namedtuples.GANModel)
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
expected_var_names = (generator_var_names if is_predict else
generator_var_names | discriminator_var_names)
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
assertions = []
def _or_none(x):
return None if is_predict else x
testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
# TODO(joelshor): Add check on `generated_data`.
testcase.assertItemsEqual(
generator_var_names,
set([x.name for x in gan_model.generator_variables]))
testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
# TODO(joelshor): Add check on `discriminator_real_outputs`.
# TODO(joelshor): Add check on `discriminator_gen_outputs`.
if is_predict:
testcase.assertIsNone(gan_model.discriminator_scope)
else:
testcase.assertEqual(discriminator_scope_name,
gan_model.discriminator_scope.name)
with ops.control_dependencies(assertions):
if mode == model_fn_lib.ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
mode=mode, loss=array_ops.zeros([]),
train_op=control_flow_ops.no_op(), training_hooks=[])
elif mode == model_fn_lib.ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data,
loss=array_ops.zeros([]))
elif mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
testcase.fail('Invalid mode: {}'.format(mode))
head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
head.create_estimator_spec = test.mock.MagicMock(
wraps=_create_estimator_spec)
return head
class GANModelFnTest(test.TestCase):
"""Tests that _gan_model_fn passes expected logits to mock head."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_logits_helper(self, mode):
"""Tests that the expected logits are passed to mock head."""
with ops.Graph().as_default():
training_util.get_or_create_global_step()
generator_inputs = {'x': array_ops.zeros([5, 4])}
real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
array_ops.zeros([5, 4]))
generator_scope_name = 'generator'
head = mock_head(self,
expected_generator_inputs=generator_inputs,
expected_real_data=real_data,
generator_scope_name=generator_scope_name)
estimator_spec = estimator._gan_model_fn(
features=generator_inputs,
labels=real_data,
mode=mode,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_scope_name=generator_scope_name,
head=head)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn_lib.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn_lib.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
def test_logits_predict(self):
self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
def test_logits_eval(self):
self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
def test_logits_train(self):
self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
def get_metrics(gan_model):
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
gan_model.real_data, gan_model.generated_data)
}
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/env python
import time
import wx
import images
#-------------------------------------------------------------------
class MyFrame(wx.Frame):
def __init__(self, parent, id, log):
wx.Frame.__init__(self, parent, id, 'Playing with menus', size=(500, 250))
self.log = log
self.CenterOnScreen()
self.CreateStatusBar()
self.SetStatusText("This is the statusbar")
tc = wx.TextCtrl(self, -1, """
A bunch of bogus menus have been created for this frame. You
can play around with them to see how they behave and then
check the source for this sample to see how to implement them.
""", style=wx.TE_READONLY|wx.TE_MULTILINE)
# Prepare the menu bar
menuBar = wx.MenuBar()
# 1st menu from left
menu1 = wx.Menu()
menu1.Append(101, "&Mercury", "This the text in the Statusbar")
menu1.Append(102, "&Venus", "")
menu1.Append(103, "&Earth", "You may select Earth too")
menu1.AppendSeparator()
menu1.Append(104, "&Close", "Close this frame")
# Add menu to the menu bar
menuBar.Append(menu1, "&Planets")
# 2nd menu from left
menu2 = wx.Menu()
menu2.Append(201, "Hydrogen")
menu2.Append(202, "Helium")
# a submenu in the 2nd menu
submenu = wx.Menu()
submenu.Append(2031,"Lanthanium")
submenu.Append(2032,"Cerium")
submenu.Append(2033,"Praseodymium")
menu2.Append(203, "Lanthanides", submenu)
# Append 2nd menu
menuBar.Append(menu2, "&Elements")
menu3 = wx.Menu()
# Radio items
menu3.Append(301, "IDLE", "a Python shell using tcl/tk as GUI", wx.ITEM_RADIO)
menu3.Append(302, "PyCrust", "a Python shell using wxPython as GUI", wx.ITEM_RADIO)
menu3.Append(303, "psi", "a simple Python shell using wxPython as GUI", wx.ITEM_RADIO)
menu3.AppendSeparator()
menu3.Append(304, "project1", "", wx.ITEM_NORMAL)
menu3.Append(305, "project2", "", wx.ITEM_NORMAL)
menuBar.Append(menu3, "&Shells")
menu4 = wx.Menu()
# Check menu items
menu4.Append(401, "letters", "abcde...", wx.ITEM_CHECK)
menu4.Append(402, "digits", "123...", wx.ITEM_CHECK)
menu4.Append(403, "letters and digits", "abcd... + 123...", wx.ITEM_CHECK)
menuBar.Append(menu4, "Chec&k")
menu5 = wx.Menu()
# Show how to put an icon in the menu item
item = wx.MenuItem(menu5, 500, "&Smile!\tCtrl+S", "This one has an icon")
item.SetBitmap(images.Smiles.GetBitmap())
menu5.Append(item)
menuitemwithbmp = wx.MenuItem(menu5, wx.ID_ANY, "Submenu with Bitmap")
# Show how to change the background colour of the menu item
menuitemwithbmp.SetBackgroundColour(wx.YELLOW)
# Show how to change the menu item's text colour
menuitemwithbmp.SetTextColour(wx.BLUE)
# Show how to change the menu item's font
menuitemwithbmp.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, ''))
submenu = wx.Menu(style=wx.MENU_TEAROFF)
submenu.Append(wx.MenuItem(menu5, wx.ID_ANY, "Woot!"))
menuitemwithbmp.SetBitmap(images.book.GetBitmap())
menuitemwithbmp.SetSubMenu(submenu)
menu5.Append(menuitemwithbmp)
# Shortcuts
menu5.Append(501, "Interesting thing\tCtrl+A", "Note the shortcut!")
menu5.AppendSeparator()
menu5.Append(502, "Hello\tShift+H")
menu5.AppendSeparator()
menu5.Append(503, "remove the submenu")
menu6 = wx.Menu()
menu6.Append(601, "Submenu Item")
menu5.Append(504, "submenu", menu6)
menu5.Append(505, "remove this menu")
menu5.Append(506, "this is updated")
menu5.Append(507, "insert after this...")
menu5.Append(508, "...and before this")
menuBar.Append(menu5, "&Fun")
self.SetMenuBar(menuBar)
# Menu events
self.Bind(wx.EVT_MENU_HIGHLIGHT_ALL, self.OnMenuHighlight)
self.Bind(wx.EVT_MENU, self.Menu101, id=101)
self.Bind(wx.EVT_MENU, self.Menu102, id=102)
self.Bind(wx.EVT_MENU, self.Menu103, id=103)
self.Bind(wx.EVT_MENU, self.CloseWindow, id=104)
self.Bind(wx.EVT_MENU, self.Menu201, id=201)
self.Bind(wx.EVT_MENU, self.Menu202, id=202)
self.Bind(wx.EVT_MENU, self.Menu2031, id=2031)
self.Bind(wx.EVT_MENU, self.Menu2032, id=2032)
self.Bind(wx.EVT_MENU, self.Menu2033, id=2033)
self.Bind(wx.EVT_MENU, self.Menu301To303, id=301)
self.Bind(wx.EVT_MENU, self.Menu301To303, id=302)
self.Bind(wx.EVT_MENU, self.Menu301To303, id=303)
self.Bind(wx.EVT_MENU, self.Menu304, id=304)
self.Bind(wx.EVT_MENU, self.Menu305, id=305)
# Range of menu items
self.Bind(wx.EVT_MENU_RANGE, self.Menu401To403, id=401, id2=403)
self.Bind(wx.EVT_MENU, self.Menu500, id=500)
self.Bind(wx.EVT_MENU, self.Menu501, id=501)
self.Bind(wx.EVT_MENU, self.Menu502, id=502)
self.Bind(wx.EVT_MENU, self.TestRemove, id=503)
self.Bind(wx.EVT_MENU, self.TestRemove2, id=505)
self.Bind(wx.EVT_MENU, self.TestInsert, id=507)
self.Bind(wx.EVT_MENU, self.TestInsert, id=508)
wx.GetApp().Bind(wx.EVT_UPDATE_UI, self.TestUpdateUI, id=506)
# Methods
def OnMenuHighlight(self, event):
# Show how to get menu item info from this event handler
id = event.GetMenuId()
item = self.GetMenuBar().FindItemById(id)
if item:
text = item.GetText()
help = item.GetHelp()
# but in this case just call Skip so the default is done
event.Skip()
def Menu101(self, event):
self.log.write('Welcome to Mercury\n')
def Menu102(self, event):
self.log.write('Welcome to Venus\n')
def Menu103(self, event):
self.log.write('Welcome to the Earth\n')
def CloseWindow(self, event):
self.Close()
def Menu201(self, event):
self.log.write('Chemical element number 1\n')
def Menu202(self, event):
self.log.write('Chemical element number 2\n')
def Menu2031(self, event):
self.log.write('Element number 57\n')
def Menu2032(self, event):
self.log.write('Element number 58\n')
def Menu2033(self, event):
self.log.write('Element number 59\n')
def Menu301To303(self, event):
id = event.GetId()
self.log.write('Event id: %d\n' % id)
def Menu304(self, event):
self.log.write('Not yet available\n')
def Menu305(self, event):
self.log.write('Still vapour\n')
def Menu401To403(self, event):
self.log.write('From a EVT_MENU_RANGE event\n')
def Menu500(self, event):
self.log.write('Have a happy day!\n')
def Menu501(self, event):
self.log.write('Look in the code how the shortcut has been realized\n')
def Menu502(self, event):
self.log.write('Hello from Jean-Michel\n')
def TestRemove(self, evt):
mb = self.GetMenuBar()
submenuItem = mb.FindItemById(601)
if not submenuItem:
return
submenu = submenuItem.GetMenu()
menu = submenu.GetParent()
# This works
#menu.Remove(504)
# this also works
menu.RemoveItem(mb.FindItemById(504))
# This doesn't work, as expected since submenuItem is not on menu
#menu.RemoveItem(submenuItem)
def TestRemove2(self, evt):
mb = self.GetMenuBar()
mb.Remove(4)
def TestUpdateUI(self, evt):
text = time.ctime()
evt.SetText(text)
def TestInsert(self, evt):
theID = 508
# get the menu
mb = self.GetMenuBar()
menuItem = mb.FindItemById(theID)
menu = menuItem.GetMenu()
# figure out the position to insert at
pos = 0
for i in menu.GetMenuItems():
if i.GetId() == theID:
break
pos += 1
# now insert the new item
ID = wx.NewId()
item = wx.MenuItem(menu, ID, "NewItem " + str(ID))
menu.InsertItem(pos, item)
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Show the Menu sample", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
win = MyFrame(self, -1, self.log)
win.Show(True)
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#-------------------------------------------------------------------
overview = """\
A demo of using wx.MenuBar and wx.Menu in various ways.
A menu is a popup (or pull down) list of items, one of which may be selected
before the menu goes away (clicking elsewhere dismisses the menu). Menus may be
used to construct either menu bars or popup menus.
A menu item has an integer ID associated with it which can be used to identify
the selection, or to change the menu item in some way. A menu item with a special
identifier -1 is a separator item and doesn't have an associated command but just
makes a separator line appear in the menu.
Menu items may be either normal items, check items or radio items. Normal items
don't have any special properties while the check items have a boolean flag associated
to them and they show a checkmark in the menu when the flag is set. wxWindows
automatically toggles the flag value when the item is clicked and its value may
be retrieved using either IsChecked method of wx.Menu or wx.MenuBar itself or by
using wxEvent.IsChecked when you get the menu notification for the item in question.
The radio items are similar to the check items except that all the other items
in the same radio group are unchecked when a radio item is checked. The radio group
is formed by a contiguous range of radio items, i.e. it starts at the first item of
this kind and ends with the first item of a different kind (or the end of the menu).
Notice that because the radio groups are defined in terms of the item positions
inserting or removing the items in the menu containing the radio items risks to not
work correctly. Finally note that the radio items are only supported under Windows
and GTK+ currently.
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import pytz
import requests
import uuid
from mock import patch
from oslo_config import cfg
import six
import six.moves.urllib.parse as urlparse
from storyboard.api.auth import ErrorMessages as e_msg
from storyboard.db.api import access_tokens as token_api
from storyboard.db.api import auth_codes as auth_api
from storyboard.db.api import refresh_tokens
from storyboard.tests import base
CONF = cfg.CONF
class BaseOAuthTest(base.FunctionalTest):
"""Base functional test class, including reusable assertions."""
def assertValidRedirect(self, response, redirect_uri,
expected_status_code, **kwargs):
"""Validate a redirected error response. All the URL components should
match the original redirect_uri, with the exception of the parameters,
which should contain an 'error' and an 'error_description' field of
the provided types.
:param redirect_uri: The expected redirect_uri
:param response: The raw HTTP response.
:param expected_status_code: The expected status code.
:param kwargs: Parameters expected in the URI parameters.
:return:
"""
self.assertEqual(expected_status_code, response.status_code)
# Split the url into parts.
location = response.headers.get('Location')
location_url = urlparse.urlparse(location)
parameters = urlparse.parse_qs(location_url[4])
# Break out the redirect uri to compare and make sure we're headed
# back to the redirect URI with the appropriate error codes.
configured_url = urlparse.urlparse(redirect_uri)
self.assertEqual(configured_url[0], location_url[0])
self.assertEqual(configured_url[1], location_url[1])
self.assertEqual(configured_url[2], location_url[2])
self.assertEqual(configured_url[3], location_url[3])
# 4 is ignored, it contains new parameters.
self.assertEqual(configured_url[5], location_url[5])
# Make sure we have the correct error response.
self.assertEqual(len(kwargs), len(parameters))
for key, value in six.iteritems(kwargs):
self.assertIn(key, parameters)
self.assertIsNotNone(parameters[key])
self.assertEqual(value, parameters[key][0])
class TestOAuthAuthorize(BaseOAuthTest):
"""Functional tests for our /oauth/authorize endpoint. For more
information, please see here: http://tools.ietf.org/html/rfc6749
This is not yet a comprehensive test of this endpoint, though it hits
the major error cases. Additional work as follows:
* Test that including a request parameter more than once results in
invalid_request
* Test that server errors return with error_description="server_error"
"""
valid_params = {
'response_type': 'code',
'client_id': 'storyboard.openstack.org',
'redirect_uri': 'https://storyboard.openstack.org/#!/auth/token',
'scope': 'user'
}
def test_valid_authorize_request(self):
"""This test ensures that the authorize request against the oauth
endpoint succeeds with expected values.
"""
random_state = six.text_type(uuid.uuid4())
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**self.valid_params)
# Assert that this is a redirect response
self.assertEqual(303, response.status_code)
# Assert that the redirect request goes to launchpad.
location = response.headers.get('Location')
location_url = urlparse.urlparse(location)
parameters = urlparse.parse_qs(location_url[4])
# Check the URL
conf_openid_url = CONF.oauth.openid_url
self.assertEqual(conf_openid_url, location[0:len(conf_openid_url)])
# Check OAuth Registration parameters
self.assertIn('fullname', parameters['openid.sreg.required'][0])
self.assertIn('email', parameters['openid.sreg.required'][0])
# Check redirect URL
redirect = parameters['openid.return_to'][0]
redirect_url = urlparse.urlparse(redirect)
redirect_params = urlparse.parse_qs(redirect_url[4])
self.assertIn('/openid/authorize_return', redirect)
self.assertEqual(random_state,
redirect_params['state'][0])
self.assertEqual(self.valid_params['redirect_uri'],
redirect_params['sb_redirect_uri'][0])
def test_authorize_invalid_response_type(self):
"""Assert that an invalid response_type redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
invalid_params['response_type'] = 'invalid_code'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='unsupported_response_type',
error_description=e_msg.INVALID_RESPONSE_TYPE)
def test_authorize_no_response_type(self):
"""Assert that an nonexistent response_type redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
del invalid_params['response_type']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='unsupported_response_type',
error_description=e_msg.NO_RESPONSE_TYPE)
def test_authorize_no_client(self):
"""Assert that a nonexistent client redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
del invalid_params['client_id']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='invalid_client',
error_description=e_msg.NO_CLIENT_ID)
def test_authorize_invalid_client(self):
"""Assert that an invalid client redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
invalid_params['client_id'] = 'invalid_client'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='unauthorized_client',
error_description=e_msg.INVALID_CLIENT_ID)
def test_authorize_invalid_scope(self):
"""Assert that an invalid scope redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
invalid_params['scope'] = 'invalid_scope'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='invalid_scope',
error_description=e_msg.INVALID_SCOPE)
def test_authorize_no_scope(self):
"""Assert that a nonexistent scope redirects back to the
redirect_uri and provides the expected error response.
"""
invalid_params = self.valid_params.copy()
del invalid_params['scope']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='invalid_scope',
error_description=e_msg.NO_SCOPE)
def test_authorize_invalid_redirect_uri(self):
"""Assert that an invalid redirect_uri returns a 400 message with the
appropriate error message encoded in the body of the response.
"""
invalid_params = self.valid_params.copy()
invalid_params['redirect_uri'] = 'not_a_valid_uri'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Assert that this is NOT a redirect
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('invalid_request', response.json['error'])
self.assertEqual(e_msg.INVALID_REDIRECT_URI,
response.json['error_description'])
def test_authorize_no_redirect_uri(self):
"""Assert that a nonexistent redirect_uri returns a 400 message with
the appropriate error message encoded in the body of the response.
"""
invalid_params = self.valid_params.copy()
del invalid_params['redirect_uri']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Assert that this is NOT a redirect
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('invalid_request', response.json['error'])
self.assertEqual(e_msg.NO_REDIRECT_URI,
response.json['error_description'])
@patch.object(requests, 'post')
class TestOAuthAuthorizeReturn(BaseOAuthTest):
"""Functional tests for our /oauth/authorize_return, which handles
responses from the launchpad service. The expected behavior here is that
a successful response will 303 back to the client in accordance with
the OAuth Authorization Response as described here:
http://tools.ietf.org/html/rfc6749#section-4.1.2
Errors from launchpad should be recast into the appropriate error code
and follow the error responses in the same section.
"""
valid_params = {
'response_type': 'code',
'client_id': 'storyboard.openstack.org',
'sb_redirect_uri': 'https://storyboard.openstack.org/!#/auth/token',
'scope': 'user',
'openid.assoc_handle': '{HMAC-SHA1}{54d11f3f}{lmmpZg==}',
'openid.ax.count.Email': 0,
'openid.ax.type.Email': 'http://schema.openid.net/contact/email',
'openid.ax.count.FirstName': 0,
'openid.ax.type.FirstName': 'http://schema.openid.net/namePerson'
'/first',
'openid.ax.count.LastName': 0,
'openid.ax.type.LastName': 'http://schema.openid.net/namePerson'
'/last',
'openid.ax.mode': 'fetch_response',
# These two would usually be the OpenID URI.
'openid.claimed_id': 'regularuser_openid',
'openid.identity': 'regularuser_openid',
'openid.mode': 'id_res',
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ns.sreg": "http://openid.net/sreg/1.0",
"openid.op_endpoint": "https://login.launchpad.net/+openid",
"openid.response_nonce": "2015-02-03T19:19:27ZY5SIfO",
"openid.return_to": "https://storyboard.openstack.org/api/v1/openid"
"/authorize_return?scope=user",
"openid.sig=2ghVIBuCYDFe32cMOvY9rTCsQfg": "",
"openid.signed": "assoc_handle,ax.count.Email,ax.count.FirstName,"
"ax.count.LastName,ax.mode,ax.type.Email,"
"ax.type.FirstName,ax.type.LastName,claimed_id,"
"identity,mode,ns,ns.ax,ns.sreg,op_endpoint,"
"response_nonce,return_to,signed,sreg.email,"
"sreg.fullname",
"openid.sreg.email": "test@example.com",
"openid.sreg.fullname": "Test User",
}
def _mock_response(self, mock_post, valid=True):
"""Set the mock response from the openid endpoint to either true or
false.
:param mock_post: The mock to decorate.
:param valid: Whether to provide a valid or invalid response.
:return:
"""
mock_post.return_value.status_code = 200
if valid:
mock_post.return_value.content = \
'is_valid:true\nns:http://specs.openid.net/auth/2.0\n'
else:
mock_post.return_value.content = \
'is_valid:false\nns:http://specs.openid.net/auth/2.0\n'
def test_valid_response_request(self, mock_post):
"""This test ensures that the authorize request against the oauth
endpoint succeeds with expected values.
"""
self._mock_response(mock_post, valid=True)
random_state = six.text_type(uuid.uuid4())
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize_return',
expect_errors=True,
state=random_state,
**self.valid_params)
# Try to pull the code out of the response
location = response.headers.get('Location')
location_url = urlparse.urlparse(location)
parameters = urlparse.parse_qs(location_url[4])
with base.HybridSessionManager():
token = auth_api.authorization_code_get(parameters['code'])
redirect_uri = self.valid_params['sb_redirect_uri']
# Validate the redirect response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=redirect_uri,
state=token.state,
code=token.code)
def test_invalid_response_request(self, mock_post):
"""This test ensures that a failed authorize request against the oauth
endpoint succeeds with expected values.
"""
self._mock_response(mock_post, valid=False)
random_state = six.text_type(uuid.uuid4())
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize_return',
expect_errors=True,
state=random_state,
**self.valid_params)
redirect_uri = self.valid_params['sb_redirect_uri']
# Validate the redirect response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=redirect_uri,
error='access_denied',
error_description=e_msg.OPEN_ID_TOKEN_INVALID)
def test_invalid_redirect_no_name(self, mock_post):
"""If the oauth response to storyboard is valid, but does not include a
first name, it should error.
"""
self._mock_response(mock_post, valid=True)
random_state = six.text_type(uuid.uuid4())
invalid_params = self.valid_params.copy()
del invalid_params['openid.sreg.fullname']
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize_return',
expect_errors=True,
state=random_state,
**invalid_params)
redirect_uri = self.valid_params['sb_redirect_uri']
# Validate the redirect response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=redirect_uri,
error='invalid_request',
error_description=e_msg.INVALID_NO_NAME)
def test_invalid_redirect_no_email(self, mock_post):
"""If the oauth response to storyboard is valid, but does not include a
first name, it should error.
"""
self._mock_response(mock_post, valid=True)
random_state = six.text_type(uuid.uuid4())
invalid_params = self.valid_params.copy()
del invalid_params['openid.sreg.email']
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize_return',
expect_errors=True,
state=random_state,
**invalid_params)
redirect_uri = self.valid_params['sb_redirect_uri']
# Validate the redirect response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=redirect_uri,
error='invalid_request',
error_description=e_msg.INVALID_NO_EMAIL)
class TestOAuthAccessToken(BaseOAuthTest):
"""Functional test for the /oauth/token endpoint for the generation of
access tokens.
"""
tested_timezones = [
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
]
def test_valid_access_request(self):
"""This test ensures that the access token request may execute
properly with a valid token.
"""
# Generate a valid auth token
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code'
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(200, response.status_code)
# Assert that the token came back in the response
token = response.json
self.assertIsNotNone(token['access_token'])
self.assertIsNotNone(token['expires_in'])
self.assertIsNotNone(token['id_token'])
self.assertIsNotNone(token['refresh_token'])
self.assertIsNotNone(token['token_type'])
self.assertEqual('Bearer', token['token_type'])
# Assert that the access token is in the database
with base.HybridSessionManager():
access_token = \
token_api.access_token_get_by_token(token['access_token'])
self.assertIsNotNone(access_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, access_token.user_id)
self.assertEqual(token['id_token'], access_token.user_id)
self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)
self.assertEqual(token['expires_in'], access_token.expires_in)
self.assertEqual(token['access_token'], access_token.access_token)
# Assert that the refresh token is in the database
with base.HybridSessionManager():
refresh_token = \
refresh_tokens.refresh_token_get_by_token(
token['refresh_token'])
self.assertIsNotNone(refresh_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, refresh_token.user_id)
self.assertEqual(CONF.oauth.refresh_token_ttl,
refresh_token.expires_in)
self.assertEqual(token['refresh_token'], refresh_token.refresh_token)
# Assert that the authorization code is no longer in the database.
with base.HybridSessionManager():
none_code = \
auth_api.authorization_code_get(authorization_code.code)
self.assertIsNone(none_code)
def test_valid_access_token_time(self):
"""Assert that a newly created access token is valid if storyboard is
installed in a multitude of timezones.
"""
# Store the old TZ info, if it exists.
old_tz = None
if 'TZ' in os.environ:
old_tz = os.environ['TZ']
# Convert now into every possible timezone out there :)
for name in self.tested_timezones:
# Override the 'default timezone' for the current runtime.
os.environ['TZ'] = name
# Create a token.
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300
})
content_type = 'application/x-www-form-urlencoded'
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a valid call.
self.assertEqual(200, response.status_code)
# Reset the timezone.
if old_tz:
os.environ['TZ'] = old_tz
else:
del os.environ['TZ']
def test_expired_access_token_time(self):
"""This test ensures that an access token is seen as expired if
storyboard is installed in multiple timezones.
"""
expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(
minutes=6)
# Store the old TZ info, if it exists.
old_tz = None
if 'TZ' in os.environ:
old_tz = os.environ['TZ']
# Convert now into every possible timezone out there :)
for name in self.tested_timezones:
# Override the 'default timezone' for the current runtime.
os.environ['TZ'] = name
# Create a token.
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300,
'created_at': expired
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a valid call.
self.assertEqual(401, response.status_code)
# Reset the timezone.
if old_tz:
os.environ['TZ'] = old_tz
else:
del os.environ['TZ']
def test_invalid_grant_type(self):
"""This test ensures that invalid grant_type parameters get the
appropriate error response.
"""
# Generate a valid auth token
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'invalid_grant_type'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('unsupported_grant_type', response.json['error'])
self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,
response.json['error_description'])
def test_invalid_access_token(self):
"""This test ensures that invalid grant_type parameters get the
appropriate error response.
"""
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': 'invalid_access_token',
'grant_type': 'invalid_grant_type'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('unsupported_grant_type', response.json['error'])
self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,
response.json['error_description'])
def test_valid_refresh_token(self):
"""This test ensures that a valid refresh token can be converted into
a valid access token, and cleans up after itself.
"""
# Generate a valid access code
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code'
})
content_type = 'application/x-www-form-urlencoded'
# Generate an auth and a refresh token.
resp_1 = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(200, resp_1.status_code)
# Assert that the token came back in the response
t1 = resp_1.json
# Assert that both are in the database.
with base.HybridSessionManager():
access_token = \
token_api.access_token_get_by_token(t1['access_token'])
self.assertIsNotNone(access_token)
with base.HybridSessionManager():
refresh_token = refresh_tokens.refresh_token_get_by_token(
t1['refresh_token'])
self.assertIsNotNone(refresh_token)
content_type = 'application/x-www-form-urlencoded'
# Issue a refresh token request.
resp_2 = self.app.post('/v1/openid/token',
params={
'refresh_token': t1['refresh_token'],
'grant_type': 'refresh_token'
},
content_type=content_type,
expect_errors=True)
# Assert that the response is good.
self.assertEqual(200, resp_2.status_code)
# Assert that the token came back in the response
t2 = resp_2.json
self.assertIsNotNone(t2['access_token'])
self.assertIsNotNone(t2['expires_in'])
self.assertIsNotNone(t2['id_token'])
self.assertIsNotNone(t2['refresh_token'])
self.assertIsNotNone(t2['token_type'])
self.assertEqual('Bearer', t2['token_type'])
# Assert that the access token is in the database
with base.HybridSessionManager():
new_access_token = \
token_api.access_token_get_by_token(t2['access_token'])
self.assertIsNotNone(new_access_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, new_access_token.user_id)
self.assertEqual(t2['id_token'], new_access_token.user_id)
self.assertEqual(t2['expires_in'], CONF.oauth.access_token_ttl)
self.assertEqual(t2['expires_in'], new_access_token.expires_in)
self.assertEqual(t2['access_token'],
new_access_token.access_token)
# Assert that the refresh token is in the database
with base.HybridSessionManager():
new_refresh_token = refresh_tokens.refresh_token_get_by_token(
t2['refresh_token'])
self.assertIsNotNone(new_refresh_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, new_refresh_token.user_id)
self.assertEqual(CONF.oauth.refresh_token_ttl,
new_refresh_token.expires_in)
self.assertEqual(t2['refresh_token'],
new_refresh_token.refresh_token)
# Assert that the old access tokens are no longer in the database and
# have been cleaned up.
with base.HybridSessionManager():
no_access_token = \
token_api.access_token_get_by_token(t1['access_token'])
with base.HybridSessionManager():
no_refresh_token = \
refresh_tokens.refresh_token_get_by_token(t1['refresh_token'])
self.assertIsNone(no_refresh_token)
self.assertIsNone(no_access_token)
def test_invalid_refresh_token(self):
"""This test ensures that an invalid refresh token can be converted
into a valid access token.
"""
content_type = 'application/x-www-form-urlencoded'
# Generate an auth and a refresh token.
resp_1 = self.app.post('/v1/openid/token',
params={
'refresh_token': 'invalid_refresh_token',
'grant_type': 'refresh_token'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a correct response
self.assertEqual(401, resp_1.status_code)
self.assertIsNotNone(resp_1.json)
self.assertEqual('invalid_grant', resp_1.json['error'])
| |
from bcrypt import gensalt
from bcrypt import hashpw
from datetime import date
from datetime import datetime
from functools import reduce
from sqlalchemy import BigInteger
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import DateTime
from sqlalchemy import Enum
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Time
from sqlalchemy import UniqueConstraint
from sqlalchemy import or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class User(Base):
"""A user on the site."""
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(32), unique=True, nullable=False)
password = Column(String(60), nullable=False) # 60 is length of bcrypt hashes
# We could use an unsigned integer here, but it's deprecated
signup_ip = Column(BigInteger, nullable=False) # TODO(2012-10-27) support IPv6
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
def __init__(self, username, password, signup_ip):
self.username = username
self.password = hashpw(password, gensalt())
if isinstance(signup_ip, int):
self.signup_ip = signup_ip
else:
self.signup_ip = reduce(lambda a, b: a << 8 | b, map(int, signup_ip.split('.')))
class UserEmail(Base):
"""An email address that's attached to a user account."""
__tablename__ = 'user_email'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
email = Column(String(255), unique=True, nullable=False)
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
def __init__(self, user, email):
if isinstance(user, int):
self.user_id = user
elif isinstance(user, str) or isinstance(user, bytes):
user_id = DBSession.query(User.id).filter_by(username=user).first()
self.user_id = user_id.id
self.email = email
class State(Base):
"""U.S. state or Canadian province, territory, capitol, or other."""
__tablename__ = 'state'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
abbreviation = Column(String(255), nullable=False)
type = Column(String(255), nullable=False)
def get_park_types():
return dict([
('IHS', 'International Historic Site'),
('NB', 'National Battlefield'),
('NBP', 'National Battlefield Park'),
('NBS', 'National Battlefied Site'),
('NHA', 'National Heritage Area'), # Not in the stamp book
('NHC', 'National Heritage Corridor'), # Not in the stamp book
('NHP', 'National Historical Park'),
('NHP & EP', 'National Historical Park and Ecological Preserve'),
('NHP & PRES', 'National Historical Park and Preserve'),
('NH RES', 'National Historical Reserve'),
('NHS', 'National Historic Site'),
('NHT', 'National Historic Trail'),
('NL', 'National Lakeshore'),
('NM', 'National Monument'),
('NM & PRES', 'National Monument and Preserve'),
('NMP', 'National Military Park'),
('N MEM', 'National Memorial'),
('NP', 'National Park'),
('N & SP', 'National and State Parks'),
('NP & PRES', 'National Park and Preserve'),
('N PRES', 'National Preserve'),
('NR', 'National River'),
('NRA', 'National Recreation Area'),
('NRR', 'National Recreation River'),
('NRRA', 'National River and Recreation Area'),
('N RES', 'National Reserve'),
('NS', 'National Seashore'),
('NSR', 'National Scenic River or Riverway'),
('NST', 'National Scenic Trail'),
('PKWY', 'Parkway'),
('SRR', 'Scenic and Recreational River'),
('WR', 'Wild River'),
('WSR', 'Wild and Scenic River'),
])
class Park(Base):
"""A park, national historic landmark, or other area administered by the
National Park Service.
"""
__tablename__ = 'park'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, unique=True)
url = Column(String(255), nullable=False, unique=True)
state_id = Column(Integer, ForeignKey('state.id'), nullable=False)
latitude = Column(Float)
CheckConstraint('latitude >= -90 and latitude <= 90')
longitude = Column(Float)
CheckConstraint('longitude >= -90 and longitude <= 90')
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
date_founded = Column(Date)
region = Column(Enum('NA', 'MA', 'NC', 'SE', 'MW', 'SW', 'RM', 'W', 'PNWA', name='region'), nullable=False)
type = Column(Enum(*get_park_types().keys(), name='park_type'))
added_by_user_id = Column(Integer, ForeignKey('user.id'))
def __init__(self, name, state, url, region, type, latitude=None, longitude=None, date_founded=None):
self.name = name
if isinstance(state, int):
self.state_id = state
elif isinstance(state, str) or isinstance(state, bytes):
# Try both the name and the abbreviation
self.state_id = DBSession.query(
State.id
).filter(
or_(
State.abbreviation == state,
State.name == state
)
).scalar()
self.url = url
self.region = region
self.type = type
self.latitude = latitude
self.longitude = longitude
self.date_founded = date_founded
class Stamp(Base):
"""A passport park stamp."""
__tablename__ = 'stamp'
id = Column(Integer, primary_key=True)
text = Column(String(255), nullable=False)
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
time_updated = Column(DateTime, nullable=True, onupdate=datetime.utcnow)
type = Column(Enum('normal', 'bonus', name='stamp_type'), nullable=False)
status = Column(Enum('active', 'lost', 'archived', name='stamp_status'), nullable=False)
added_by_user_id = Column(Integer, ForeignKey('user.id'))
class StampHistory(Base):
"""A history of stamp edits."""
# TODO(bskari|2013-08-24) This should be stored in a log or something
# instead of the database
__tablename__ = 'stamp_history'
id = Column(Integer, primary_key=True)
stamp_id = Column(Integer, ForeignKey('stamp.id'))
text = Column(String(255), nullable=False)
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
type = Column(Enum('normal', 'bonus', name='stamp_history_type'), nullable=False)
status = Column(Enum('active', 'lost', 'archived', name='stamp_status_type'), nullable=False)
edited_by_user_id = Column(Integer, ForeignKey('user.id'))
class StampCollection(Base):
"""A user has recorded a collection of a stamp."""
__tablename__ = 'stamp_collection'
id = Column(Integer, primary_key=True)
# I could just record the StampToLocation id, but then I would have to
# use a flag for 'active' so that I could remove StampToLocation entries
# when stamps are lost or removed from a location. Recording the stamp_id
# and park_id seems like a cleaner plan.
stamp_id = Column(Integer, ForeignKey('stamp.id'), nullable=False)
park_id = Column(Integer, ForeignKey('park.id'), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
date_collected = Column(Date, nullable=False, default=date.today)
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
Index('idx_user_id', StampCollection.user_id)
Index('idx_stamp_id', StampCollection.stamp_id)
class StampLocation(Base):
"""A location where stamps can be collected, e.g. a visitor's center."""
__tablename__ = 'stamp_location'
id = Column(Integer, primary_key=True)
park_id = Column(Integer, ForeignKey('park.id'), nullable=False)
description = Column(String(255), nullable=False)
address = Column(String(255))
latitude = Column(Float)
CheckConstraint('latitude >= -90 and latitude <= 90')
longitude = Column(Float)
CheckConstraint('longitude >= -180 and longitude <= 180')
added_by_user_id = Column(Integer, ForeignKey('user.id'))
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
# Index by longitude first because the US is more wide than it is tall.
# I don't know if that will even matter, considering that longitude is
# probably going to be unique anyway.
Index('idx_longitude_latitude', StampLocation.longitude, StampLocation.latitude)
class StampToLocation(Base):
"""One stamp can be at multiple locations. This is a junction table that
stores that relationship.
"""
__tablename__ = 'stamp_at_location'
id = Column(Integer, primary_key=True)
stamp_id = Column(
Integer,
ForeignKey('stamp.id'),
nullable=False,
)
stamp_location_id = Column(
Integer,
ForeignKey('stamp_location.id'),
nullable=False,
)
UniqueConstraint('stamp_id', 'stamp_location_id')
time_created = Column(DateTime, nullable=False, default=datetime.utcnow)
class OperatingHours(Base):
"""The hours that a particular location are open for."""
__tablename__ = 'operating_hours'
id = Column(Integer, primary_key=True)
location_id = Column(
Integer,
ForeignKey('stamp_location.id'),
nullable=False,
)
day_of_week = Column(
Enum(
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
name='weekday',
),
nullable=False
)
# For example, a place that is open 09:00-17:00 would have:
# time_open: 09:00
# minutes: 480
# A place that is open 18:00-02:00 would have:
# time_open: 18:00
# minutes: 480
time_open = Column(Time, nullable=False)
minutes= Column(Integer, nullable=False)
# This might need to be relaxed if a place closes mid day
UniqueConstraint('location_id', 'day_of_week')
| |
""" ietf_restconf_monitoring
This module contains monitoring information for the
RESTCONF protocol.
Copyright (c) 2016 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC XXXX; see
the RFC itself for full legal notices.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class RestconfState(object):
"""
Contains RESTCONF protocol monitoring information.
.. attribute:: capabilities
Contains a list of protocol capability URIs
**type**\: :py:class:`Capabilities <ydk.models.ietf.ietf_restconf_monitoring.RestconfState.Capabilities>`
.. attribute:: streams
Container representing the notification event streams supported by the server
**type**\: :py:class:`Streams <ydk.models.ietf.ietf_restconf_monitoring.RestconfState.Streams>`
"""
_prefix = 'rcmon'
_revision = '2016-08-15'
def __init__(self):
self.capabilities = RestconfState.Capabilities()
self.capabilities.parent = self
self.streams = RestconfState.Streams()
self.streams.parent = self
class Capabilities(object):
"""
Contains a list of protocol capability URIs
.. attribute:: capability
A RESTCONF protocol capability URI
**type**\: list of str
"""
_prefix = 'rcmon'
_revision = '2016-08-15'
def __init__(self):
self.parent = None
self.capability = YLeafList()
self.capability.parent = self
self.capability.name = 'capability'
@property
def _common_path(self):
return '/ietf-restconf-monitoring:restconf-state/ietf-restconf-monitoring:capabilities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.capability is not None:
for child in self.capability:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_restconf_monitoring as meta
return meta._meta_table['RestconfState.Capabilities']['meta_info']
class Streams(object):
"""
Container representing the notification event streams
supported by the server.
.. attribute:: stream
Each entry describes an event stream supported by the server
**type**\: list of :py:class:`Stream <ydk.models.ietf.ietf_restconf_monitoring.RestconfState.Streams.Stream>`
"""
_prefix = 'rcmon'
_revision = '2016-08-15'
def __init__(self):
self.parent = None
self.stream = YList()
self.stream.parent = self
self.stream.name = 'stream'
class Stream(object):
"""
Each entry describes an event stream supported by
the server.
.. attribute:: name <key>
The stream name
**type**\: str
.. attribute:: access
The server will create an entry in this list for each encoding format that is supported for this stream. The media type 'text/event\-stream' is expected for all event streams. This list identifies the sub\-types supported for this stream
**type**\: list of :py:class:`Access <ydk.models.ietf.ietf_restconf_monitoring.RestconfState.Streams.Stream.Access>`
.. attribute:: description
Description of stream content
**type**\: str
.. attribute:: replay_log_creation_time
Indicates the time the replay log for this stream was created
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2})
.. attribute:: replay_support
Indicates if replay buffer supported for this stream. If 'true', then the server MUST support the 'start\-time' and 'stop\-time' query parameters for this stream
**type**\: bool
**default value**\: false
"""
_prefix = 'rcmon'
_revision = '2016-08-15'
def __init__(self):
self.parent = None
self.name = None
self.access = YList()
self.access.parent = self
self.access.name = 'access'
self.description = None
self.replay_log_creation_time = None
self.replay_support = None
class Access(object):
"""
The server will create an entry in this list for each
encoding format that is supported for this stream.
The media type 'text/event\-stream' is expected
for all event streams. This list identifies the
sub\-types supported for this stream.
.. attribute:: encoding <key>
This is the secondary encoding format within the 'text/event\-stream' encoding used by all streams. The type 'xml' is supported for XML encoding. The type 'json' is supported for JSON encoding
**type**\: str
.. attribute:: location
Contains a URL that represents the entry point for establishing notification delivery via server sent events
**type**\: str
**mandatory**\: True
"""
_prefix = 'rcmon'
_revision = '2016-08-15'
def __init__(self):
self.parent = None
self.encoding = None
self.location = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.encoding is None:
raise YPYModelError('Key property encoding is None')
return self.parent._common_path +'/ietf-restconf-monitoring:access[ietf-restconf-monitoring:encoding = ' + str(self.encoding) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.encoding is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_restconf_monitoring as meta
return meta._meta_table['RestconfState.Streams.Stream.Access']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/ietf-restconf-monitoring:restconf-state/ietf-restconf-monitoring:streams/ietf-restconf-monitoring:stream[ietf-restconf-monitoring:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.name is not None:
return True
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
if self.description is not None:
return True
if self.replay_log_creation_time is not None:
return True
if self.replay_support is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_restconf_monitoring as meta
return meta._meta_table['RestconfState.Streams.Stream']['meta_info']
@property
def _common_path(self):
return '/ietf-restconf-monitoring:restconf-state/ietf-restconf-monitoring:streams'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.stream is not None:
for child_ref in self.stream:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_restconf_monitoring as meta
return meta._meta_table['RestconfState.Streams']['meta_info']
@property
def _common_path(self):
return '/ietf-restconf-monitoring:restconf-state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.capabilities is not None and self.capabilities._has_data():
return True
if self.streams is not None and self.streams._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_restconf_monitoring as meta
return meta._meta_table['RestconfState']['meta_info']
| |
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Callable, Dict, List, Optional, Set, Text, \
Tuple, Type, Union
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise AssertionError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise AssertionError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = timezone.now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
from ryu.base import app_manager
from ryu.controller import event
from ryu.controller import ofp_event
from ryu.controller.handler import DEAD_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import addrconv
from ryu.lib import hub
from ryu.lib.dpid import dpid_to_str
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import igmp
class EventPacketIn(event.EventBase):
"""a PacketIn event class using except IGMP."""
def __init__(self, msg):
"""initialization."""
super(EventPacketIn, self).__init__()
self.msg = msg
MG_GROUP_ADDED = 1
MG_MEMBER_CHANGED = 2
MG_GROUP_REMOVED = 3
class EventMulticastGroupStateChanged(event.EventBase):
"""a event class that notifies the changes of the statuses of the
multicast groups."""
def __init__(self, reason, address, src, dsts):
"""
========= =====================================================
Attribute Description
========= =====================================================
reason why the event occurs. use one of MG_*.
address a multicast group address.
src a port number in which a querier exists.
dsts a list of port numbers in which the members exist.
========= =====================================================
"""
super(EventMulticastGroupStateChanged, self).__init__()
self.reason = reason
self.address = address
self.src = src
self.dsts = dsts
class IgmpLib(app_manager.RyuApp):
"""IGMP snooping library."""
# -------------------------------------------------------------------
# PUBLIC METHODS
# -------------------------------------------------------------------
def __init__(self):
"""initialization."""
super(IgmpLib, self).__init__()
self.name = 'igmplib'
self._querier = IgmpQuerier()
self._snooper = IgmpSnooper(self.send_event_to_observers)
def set_querier_mode(self, dpid, server_port):
"""set a datapath id and server port number to the instance
of IgmpQuerier.
============ ==================================================
Attribute Description
============ ==================================================
dpid the datapath id that will operate as a querier.
server_port the port number linked to the multicasting server.
============ ==================================================
"""
self._querier.set_querier_mode(dpid, server_port)
# -------------------------------------------------------------------
# PUBLIC METHODS ( EVENT HANDLERS )
# -------------------------------------------------------------------
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, evt):
"""PacketIn event handler. when the received packet was IGMP,
proceed it. otherwise, send a event."""
msg = evt.msg
dpid = msg.datapath.id
req_pkt = packet.Packet(msg.data)
req_igmp = req_pkt.get_protocol(igmp.igmp)
if req_igmp:
if self._querier.dpid == dpid:
self._querier.packet_in_handler(req_igmp, msg)
else:
self._snooper.packet_in_handler(req_pkt, req_igmp, msg)
else:
self.send_event_to_observers(EventPacketIn(msg))
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, evt):
"""StateChange event handler."""
datapath = evt.datapath
assert datapath is not None
if datapath.id == self._querier.dpid:
if evt.state == MAIN_DISPATCHER:
self._querier.start_loop(datapath)
elif evt.state == DEAD_DISPATCHER:
self._querier.stop_loop()
class IgmpBase(object):
"""IGMP abstract class library."""
# -------------------------------------------------------------------
# PUBLIC METHODS
# -------------------------------------------------------------------
def __init__(self):
self._set_flow_func = {
ofproto_v1_0.OFP_VERSION: self._set_flow_entry_v1_0,
ofproto_v1_2.OFP_VERSION: self._set_flow_entry_v1_2,
ofproto_v1_3.OFP_VERSION: self._set_flow_entry_v1_2,
}
self._del_flow_func = {
ofproto_v1_0.OFP_VERSION: self._del_flow_entry_v1_0,
ofproto_v1_2.OFP_VERSION: self._del_flow_entry_v1_2,
ofproto_v1_3.OFP_VERSION: self._del_flow_entry_v1_2,
}
# -------------------------------------------------------------------
# PROTECTED METHODS ( RELATED TO OPEN FLOW PROTOCOL )
# -------------------------------------------------------------------
def _set_flow_entry_v1_0(self, datapath, actions, in_port, dst,
src=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
dl_type=ether.ETH_TYPE_IP, in_port=in_port,
nw_src=self._ipv4_text_to_int(src),
nw_dst=self._ipv4_text_to_int(dst))
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, actions=actions)
datapath.send_msg(mod)
def _set_flow_entry_v1_2(self, datapath, actions, in_port, dst,
src=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
eth_type=ether.ETH_TYPE_IP, in_port=in_port, ipv4_dst=dst)
if src is not None:
match.append_field(ofproto.OXM_OF_IPV4_SRC, src)
inst = [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(
datapath=datapath, command=ofproto.OFPFC_ADD,
priority=65535, match=match, instructions=inst)
datapath.send_msg(mod)
def _set_flow_entry(self, datapath, actions, in_port, dst, src=None):
"""set a flow entry."""
set_flow = self._set_flow_func.get(datapath.ofproto.OFP_VERSION)
assert set_flow
set_flow(datapath, actions, in_port, dst, src)
def _del_flow_entry_v1_0(self, datapath, in_port, dst, src=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
dl_type=ether.ETH_TYPE_IP, in_port=in_port,
nw_src=self._ipv4_text_to_int(src),
nw_dst=self._ipv4_text_to_int(dst))
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_DELETE)
datapath.send_msg(mod)
def _del_flow_entry_v1_2(self, datapath, in_port, dst, src=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(
eth_type=ether.ETH_TYPE_IP, in_port=in_port, ipv4_dst=dst)
if src is not None:
match.append_field(ofproto.OXM_OF_IPV4_SRC, src)
mod = parser.OFPFlowMod(
datapath=datapath, command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY,
match=match)
datapath.send_msg(mod)
def _del_flow_entry(self, datapath, in_port, dst, src=None):
"""remove a flow entry."""
del_flow = self._del_flow_func.get(datapath.ofproto.OFP_VERSION)
assert del_flow
del_flow(datapath, in_port, dst, src)
def _do_packet_out(self, datapath, data, in_port, actions):
"""send a packet."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
out = parser.OFPPacketOut(
datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,
data=data, in_port=in_port, actions=actions)
datapath.send_msg(out)
# -------------------------------------------------------------------
# PROTECTED METHODS ( OTHERS )
# -------------------------------------------------------------------
def _ipv4_text_to_int(self, ip_text):
"""convert ip v4 string to integer."""
if ip_text is None:
return None
assert isinstance(ip_text, str)
return struct.unpack('!I', addrconv.ipv4.text_to_bin(ip_text))[0]
class IgmpQuerier(IgmpBase):
"""IGMP querier emulation class library.
this querier is a simplified implementation, and is not based on RFC,
for example as following points:
- ignore some constant values
- does not send a specific QUERY in response to LEAVE
- and so on
"""
# -------------------------------------------------------------------
# PUBLIC METHODS
# -------------------------------------------------------------------
def __init__(self):
"""initialization."""
super(IgmpQuerier, self).__init__()
self.name = "IgmpQuerier"
self.logger = logging.getLogger(self.name)
self.dpid = None
self.server_port = None
self._datapath = None
self._querier_thread = None
# the structure of self._macst
#
# +-------+------------------+
# | group | port: True/False |
# | +------------------+
# | |... |
# +-------+------------------+
# | ... |
# +--------------------------+
#
# group multicast address.
# port a port number which connect to the group member.
# the value indicates that whether a flow entry
# was registered.
self._mcast = {}
self._set_logger()
def set_querier_mode(self, dpid, server_port):
"""set the datapath to work as a querier. note that you can set
up only the one querier. when you called this method several
times, only the last one becomes effective."""
self.dpid = dpid
self.server_port = server_port
if self._querier_thread:
hub.kill(self._querier_thread)
self._querier_thread = None
def packet_in_handler(self, req_igmp, msg):
"""the process when the querier received IGMP."""
ofproto = msg.datapath.ofproto
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
in_port = msg.in_port
else:
in_port = msg.match['in_port']
if (igmp.IGMP_TYPE_REPORT_V1 == req_igmp.msgtype or
igmp.IGMP_TYPE_REPORT_V2 == req_igmp.msgtype):
self._do_report(req_igmp, in_port, msg)
elif igmp.IGMP_TYPE_LEAVE == req_igmp.msgtype:
self._do_leave(req_igmp, in_port, msg)
def start_loop(self, datapath):
"""start QUERY thread."""
self._datapath = datapath
self._querier_thread = hub.spawn(self._send_query)
self.logger.info("started a querier.")
def stop_loop(self):
"""stop QUERY thread."""
hub.kill(self._querier_thread)
self._querier_thread = None
self._datapath = None
self.logger.info("stopped a querier.")
# -------------------------------------------------------------------
# PRIVATE METHODS ( RELATED TO IGMP )
# -------------------------------------------------------------------
def _send_query(self):
""" send a QUERY message periodically."""
timeout = 60
ofproto = self._datapath.ofproto
parser = self._datapath.ofproto_parser
if ofproto_v1_0.OFP_VERSION == ofproto.OFP_VERSION:
send_port = ofproto.OFPP_NONE
else:
send_port = ofproto.OFPP_ANY
# create a general query.
res_igmp = igmp.igmp(
msgtype=igmp.IGMP_TYPE_QUERY,
maxresp=igmp.QUERY_RESPONSE_INTERVAL * 10,
csum=0,
address='0.0.0.0')
res_ipv4 = ipv4.ipv4(
total_length=len(ipv4.ipv4()) + len(res_igmp),
proto=inet.IPPROTO_IGMP, ttl=1,
src='0.0.0.0',
dst=igmp.MULTICAST_IP_ALL_HOST)
res_ether = ethernet.ethernet(
dst=igmp.MULTICAST_MAC_ALL_HOST,
src=self._datapath.ports[ofproto.OFPP_LOCAL].hw_addr,
ethertype=ether.ETH_TYPE_IP)
res_pkt = packet.Packet()
res_pkt.add_protocol(res_ether)
res_pkt.add_protocol(res_ipv4)
res_pkt.add_protocol(res_igmp)
res_pkt.serialize()
flood = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]
while True:
# reset reply status.
for status in self._mcast.values():
for port in status.keys():
status[port] = False
# send a general query to the host that sent this message.
self._do_packet_out(
self._datapath, res_pkt.data, send_port, flood)
hub.sleep(igmp.QUERY_RESPONSE_INTERVAL)
# QUERY timeout expired.
del_groups = []
for group, status in self._mcast.items():
del_ports = []
actions = []
for port in status.keys():
if not status[port]:
del_ports.append(port)
else:
actions.append(parser.OFPActionOutput(port))
if len(actions) and len(del_ports):
self._set_flow_entry(
self._datapath, actions, self.server_port, group)
if not len(actions):
self._del_flow_entry(
self._datapath, self.server_port, group)
del_groups.append(group)
if len(del_ports):
for port in del_ports:
self._del_flow_entry(self._datapath, port, group)
for port in del_ports:
del status[port]
for group in del_groups:
del self._mcast[group]
rest_time = timeout - igmp.QUERY_RESPONSE_INTERVAL
hub.sleep(rest_time)
def _do_report(self, report, in_port, msg):
"""the process when the querier received a REPORT message."""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
size = 65535
else:
size = ofproto.OFPCML_MAX
update = False
self._mcast.setdefault(report.address, {})
if in_port not in self._mcast[report.address]:
update = True
self._mcast[report.address][in_port] = True
if update:
actions = []
for port in self._mcast[report.address]:
actions.append(parser.OFPActionOutput(port))
self._set_flow_entry(
datapath, actions, self.server_port, report.address)
self._set_flow_entry(
datapath,
[parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, size)],
in_port, report.address)
def _do_leave(self, leave, in_port, msg):
"""the process when the querier received a LEAVE message."""
datapath = msg.datapath
parser = datapath.ofproto_parser
self._mcast.setdefault(leave.address, {})
if in_port in self._mcast[leave.address]:
self._del_flow_entry(
datapath, in_port, leave.address)
del self._mcast[leave.address][in_port]
actions = []
for port in self._mcast[leave.address]:
actions.append(parser.OFPActionOutput(port))
if len(actions):
self._set_flow_entry(
datapath, actions, self.server_port, leave.address)
else:
self._del_flow_entry(
datapath, self.server_port, leave.address)
# -------------------------------------------------------------------
# PRIVATE METHODS ( OTHERS )
# -------------------------------------------------------------------
def _set_logger(self):
"""change log format."""
self.logger.propagate = False
hdl = logging.StreamHandler()
fmt_str = '[querier][%(levelname)s] %(message)s'
hdl.setFormatter(logging.Formatter(fmt_str))
self.logger.addHandler(hdl)
class IgmpSnooper(IgmpBase):
"""IGMP snooping class library."""
# -------------------------------------------------------------------
# PUBLIC METHODS
# -------------------------------------------------------------------
def __init__(self, send_event):
"""initialization."""
super(IgmpSnooper, self).__init__()
self.name = "IgmpSnooper"
self.logger = logging.getLogger(self.name)
self._send_event = send_event
# the structure of self._to_querier
#
# +------+--------------+
# | dpid | 'port': port |
# | +--------------+
# | | 'ip': ip |
# | +--------------+
# | | 'mac': mac |
# +------+--------------+
# | ... |
# +---------------------+
#
# dpid datapath id.
# port a port number which connect to the querier.
# ip IP address of the querier.
# mac MAC address of the querier.
self._to_querier = {}
# the structure of self._to_hosts
#
# +------+-------+---------------------------------+
# | dpid | group | 'replied': True/False |
# | | +---------------------------------+
# | | | 'leave': leave |
# | | +-----------+--------+------------+
# | | | 'ports' | portno | 'out': out |
# | | | | +------------+
# | | | | | 'in': in |
# | | | +--------+------------+
# | | | | ... |
# | +-------+-----------+---------------------+
# | | ... |
# +------+-----------------------------------------+
# | ... |
# +------------------------------------------------+
#
# dpid datapath id.
# group multicast address.
# replied the value indicates whether a REPORT message was
# replied.
# leave a LEAVE message.
# portno a port number which has joined to the multicast
# group.
# out the value indicates whether a flow entry for the
# packet outputted to the port was registered.
# in the value indicates whether a flow entry for the
# packet inputted from the port was registered.
self._to_hosts = {}
self._set_logger()
def packet_in_handler(self, req_pkt, req_igmp, msg):
"""the process when the snooper received IGMP."""
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
in_port = msg.in_port
else:
in_port = msg.match['in_port']
log = "SW=%s PORT=%d IGMP received. " % (
dpid_to_str(dpid), in_port)
self.logger.debug(str(req_igmp))
if igmp.IGMP_TYPE_QUERY == req_igmp.msgtype:
self.logger.info(log + "[QUERY]")
(req_ipv4, ) = req_pkt.get_protocols(ipv4.ipv4)
(req_eth, ) = req_pkt.get_protocols(ethernet.ethernet)
self._do_query(req_igmp, req_ipv4, req_eth, in_port, msg)
elif (igmp.IGMP_TYPE_REPORT_V1 == req_igmp.msgtype or
igmp.IGMP_TYPE_REPORT_V2 == req_igmp.msgtype):
self.logger.info(log + "[REPORT]")
self._do_report(req_igmp, in_port, msg)
elif igmp.IGMP_TYPE_LEAVE == req_igmp.msgtype:
self.logger.info(log + "[LEAVE]")
self._do_leave(req_igmp, in_port, msg)
elif igmp.IGMP_TYPE_REPORT_V3 == req_igmp.msgtype:
self.logger.info(log + "V3 is not supported yet.")
self._do_flood(in_port, msg)
else:
self.logger.info(log + "[unknown type:%d]",
req_igmp.msgtype)
self._do_flood(in_port, msg)
# -------------------------------------------------------------------
# PRIVATE METHODS ( RELATED TO IGMP )
# -------------------------------------------------------------------
def _do_query(self, query, iph, eth, in_port, msg):
"""the process when the snooper received a QUERY message."""
datapath = msg.datapath
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# learn the querier.
self._to_querier[dpid] = {
'port': in_port,
'ip': iph.src,
'mac': eth.src
}
# set the timeout time.
timeout = igmp.QUERY_RESPONSE_INTERVAL
if query.maxresp:
timeout = query.maxresp / 10
self._to_hosts.setdefault(dpid, {})
if query.address == '0.0.0.0':
# general query. reset all reply status.
for group in self._to_hosts[dpid].values():
group['replied'] = False
group['leave'] = None
else:
# specific query. reset the reply status of the specific
# group.
group = self._to_hosts[dpid].get(query.address)
if group:
group['replied'] = False
group['leave'] = None
actions = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]
self._do_packet_out(
datapath, msg.data, in_port, actions)
# wait for REPORT messages.
hub.spawn(self._do_timeout_for_query, timeout, datapath)
def _do_report(self, report, in_port, msg):
"""the process when the snooper received a REPORT message."""
datapath = msg.datapath
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
size = 65535
else:
size = ofproto.OFPCML_MAX
# check whether the querier port has been specified.
outport = None
value = self._to_querier.get(dpid)
if value:
outport = value['port']
# send a event when the multicast group address is new.
self._to_hosts.setdefault(dpid, {})
if not self._to_hosts[dpid].get(report.address):
self._send_event(
EventMulticastGroupStateChanged(
MG_GROUP_ADDED, report.address, outport, []))
self._to_hosts[dpid].setdefault(
report.address,
{'replied': False, 'leave': None, 'ports': {}})
# set a flow entry from a host to the controller when
# a host sent a REPORT message.
if not self._to_hosts[dpid][report.address]['ports'].get(
in_port):
self._to_hosts[dpid][report.address]['ports'][
in_port] = {'out': False, 'in': False}
self._set_flow_entry(
datapath,
[parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, size)],
in_port, report.address)
if not self._to_hosts[dpid][report.address]['ports'][
in_port]['out']:
self._to_hosts[dpid][report.address]['ports'][
in_port]['out'] = True
if not outport:
self.logger.info("no querier exists.")
return
# set a flow entry from a multicast server to hosts.
if not self._to_hosts[dpid][report.address]['ports'][
in_port]['in']:
actions = []
ports = []
for port in self._to_hosts[dpid][report.address]['ports']:
actions.append(parser.OFPActionOutput(port))
ports.append(port)
self._send_event(
EventMulticastGroupStateChanged(
MG_MEMBER_CHANGED, report.address, outport, ports))
self._set_flow_entry(
datapath, actions, outport, report.address)
self._to_hosts[dpid][report.address]['ports'][
in_port]['in'] = True
# send a REPORT message to the querier if this message arrived
# first after a QUERY message was sent.
if not self._to_hosts[dpid][report.address]['replied']:
actions = [parser.OFPActionOutput(outport, size)]
self._do_packet_out(datapath, msg.data, in_port, actions)
self._to_hosts[dpid][report.address]['replied'] = True
def _do_leave(self, leave, in_port, msg):
"""the process when the snooper received a LEAVE message."""
datapath = msg.datapath
dpid = datapath.id
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# check whether the querier port has been specified.
if not self._to_querier.get(dpid):
self.logger.info("no querier exists.")
return
# save this LEAVE message and reset the condition of the port
# that received this message.
self._to_hosts.setdefault(dpid, {})
self._to_hosts[dpid].setdefault(
leave.address,
{'replied': False, 'leave': None, 'ports': {}})
self._to_hosts[dpid][leave.address]['leave'] = msg
self._to_hosts[dpid][leave.address]['ports'][in_port] = {
'out': False, 'in': False}
# create a specific query.
timeout = igmp.LAST_MEMBER_QUERY_INTERVAL
res_igmp = igmp.igmp(
msgtype=igmp.IGMP_TYPE_QUERY,
maxresp=timeout * 10,
csum=0,
address=leave.address)
res_ipv4 = ipv4.ipv4(
total_length=len(ipv4.ipv4()) + len(res_igmp),
proto=inet.IPPROTO_IGMP, ttl=1,
src=self._to_querier[dpid]['ip'],
dst=igmp.MULTICAST_IP_ALL_HOST)
res_ether = ethernet.ethernet(
dst=igmp.MULTICAST_MAC_ALL_HOST,
src=self._to_querier[dpid]['mac'],
ethertype=ether.ETH_TYPE_IP)
res_pkt = packet.Packet()
res_pkt.add_protocol(res_ether)
res_pkt.add_protocol(res_ipv4)
res_pkt.add_protocol(res_igmp)
res_pkt.serialize()
# send a specific query to the host that sent this message.
actions = [parser.OFPActionOutput(ofproto.OFPP_IN_PORT)]
self._do_packet_out(datapath, res_pkt.data, in_port, actions)
# wait for REPORT messages.
hub.spawn(self._do_timeout_for_leave, timeout, datapath,
leave.address, in_port)
def _do_flood(self, in_port, msg):
"""the process when the snooper received a message of the
outside for processing. """
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]
self._do_packet_out(datapath, msg.data, in_port, actions)
def _do_timeout_for_query(self, timeout, datapath):
"""the process when the QUERY from the querier timeout expired."""
dpid = datapath.id
hub.sleep(timeout)
outport = self._to_querier[dpid]['port']
remove_dsts = []
for dst in self._to_hosts[dpid]:
if not self._to_hosts[dpid][dst]['replied']:
# if no REPORT message sent from any members of
# the group, remove flow entries about the group and
# send a LEAVE message if exists.
self._remove_multicast_group(datapath, outport, dst)
remove_dsts.append(dst)
for dst in remove_dsts:
del self._to_hosts[dpid][dst]
def _do_timeout_for_leave(self, timeout, datapath, dst, in_port):
"""the process when the QUERY from the switch timeout expired."""
parser = datapath.ofproto_parser
dpid = datapath.id
hub.sleep(timeout)
outport = self._to_querier[dpid]['port']
if self._to_hosts[dpid][dst]['ports'][in_port]['out']:
return
del self._to_hosts[dpid][dst]['ports'][in_port]
self._del_flow_entry(datapath, in_port, dst)
actions = []
ports = []
for port in self._to_hosts[dpid][dst]['ports']:
actions.append(parser.OFPActionOutput(port))
ports.append(port)
if len(actions):
self._send_event(
EventMulticastGroupStateChanged(
MG_MEMBER_CHANGED, dst, outport, ports))
self._set_flow_entry(
datapath, actions, outport, dst)
self._to_hosts[dpid][dst]['leave'] = None
else:
self._remove_multicast_group(datapath, outport, dst)
del self._to_hosts[dpid][dst]
def _remove_multicast_group(self, datapath, outport, dst):
"""remove flow entries about the group and send a LEAVE message
if exists."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
dpid = datapath.id
self._send_event(
EventMulticastGroupStateChanged(
MG_GROUP_REMOVED, dst, outport, []))
self._del_flow_entry(datapath, outport, dst)
for port in self._to_hosts[dpid][dst]['ports']:
self._del_flow_entry(datapath, port, dst)
leave = self._to_hosts[dpid][dst]['leave']
if leave:
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
in_port = leave.in_port
else:
in_port = leave.match['in_port']
actions = [parser.OFPActionOutput(outport)]
self._do_packet_out(
datapath, leave.data, in_port, actions)
# -------------------------------------------------------------------
# PRIVATE METHODS ( OTHERS )
# -------------------------------------------------------------------
def _set_logger(self):
"""change log format."""
self.logger.propagate = False
hdl = logging.StreamHandler()
fmt_str = '[snoop][%(levelname)s] %(message)s'
hdl.setFormatter(logging.Formatter(fmt_str))
self.logger.addHandler(hdl)
| |
import warnings
from unittest.mock import patch
import pytest
import geopy.geocoders
from geopy.exc import ConfigurationError
from geopy.geocoders import Nominatim
from geopy.point import Point
from test.geocoders.util import BaseTestGeocoder
class BaseTestNominatim(BaseTestGeocoder):
# Common test cases for Nominatim-based geocoders.
# Assumes that Nominatim uses the OSM data.
delta = 0.04
async def test_geocode(self):
await self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
async def test_geocode_empty_result(self):
await self.geocode_run(
{"query": "dsadjkasdjasd"},
{},
expect_failure=True,
)
async def test_reverse_empty_result(self):
await self.reverse_run(
{"query": Point(0.05, -0.15)},
{},
expect_failure=True,
)
async def test_limit(self):
with pytest.raises(ValueError): # non-positive limit
await self.geocode_run(
{"query": "does not matter", "limit": 0, "exactly_one": False},
{}
)
result = await self.geocode_run(
{"query": "second street", "limit": 4, "exactly_one": False},
{}
)
assert len(result) >= 3 # PickPoint sometimes returns 3
assert 4 >= len(result)
@patch.object(geopy.geocoders.options, 'default_user_agent',
'mocked_user_agent/0.0.0')
def test_user_agent_default(self):
geocoder = self.make_geocoder(user_agent=None)
assert geocoder.headers['User-Agent'] == 'mocked_user_agent/0.0.0'
async def test_user_agent_custom(self):
geocoder = self.make_geocoder(
user_agent='my_test_application'
)
assert geocoder.headers['User-Agent'] == 'my_test_application'
async def test_reverse(self):
location = await self.reverse_run(
{"query": Point(40.75376406311989, -73.98489005863667)},
{"latitude": 40.753, "longitude": -73.984}
)
assert "New York" in location.address
async def test_structured_query(self):
await self.geocode_run(
{"query": {"country": "us", "city": "moscow",
"state": "idaho"}},
{"latitude": 46.7323875, "longitude": -117.0001651},
)
async def test_city_district_with_dict_query(self):
query = {'postalcode': 10117}
result = await self.geocode_run(
{"query": query, "addressdetails": True, "country_codes": "DE"},
{},
)
try:
# For some queries `city_district` might be missing in the response.
# For this specific query on OpenMapQuest the key is also missing.
city_district = result.raw['address']['city_district']
except KeyError:
# MapQuest
city_district = result.raw['address']['suburb']
assert city_district == 'Mitte'
async def test_geocode_language_parameter(self):
query = "Mohrenstrasse Berlin"
result_geocode = await self.geocode_run(
{"query": query, "addressdetails": True,
"language": "de"},
{},
)
assert result_geocode.raw['address']['country'] == "Deutschland"
result_geocode = await self.geocode_run(
{"query": query, "addressdetails": True,
"language": "en"},
{},
)
assert result_geocode.raw['address']['country'] == "Germany"
async def test_reverse_language_parameter(self):
query = "52.51693903613385, 13.3859332733135"
result_reverse_de = await self.reverse_run(
{"query": query, "language": "de"},
{},
)
assert result_reverse_de.raw['address']['country'] == "Deutschland"
result_reverse_en = await self.reverse_run(
{"query": query, "language": "en"},
{},
)
# have had a change in the exact authority name
assert "Germany" in result_reverse_en.raw['address']['country']
async def test_geocode_geometry_wkt(self):
result_geocode = await self.geocode_run(
{"query": "Halensee,Berlin", "geometry": 'WKT'},
{},
)
assert result_geocode.raw['geotext'].startswith('POLYGON((')
async def test_geocode_geometry_svg(self):
result_geocode = await self.geocode_run(
{"query": "Halensee,Berlin", "geometry": 'svg'},
{},
)
assert result_geocode.raw['svg'].startswith('M 13.')
async def test_geocode_geometry_kml(self):
result_geocode = await self.geocode_run(
{"query": "Halensee,Berlin", "geometry": 'kml'},
{},
)
assert result_geocode.raw['geokml'].startswith('<Polygon>')
async def test_geocode_geometry_geojson(self):
result_geocode = await self.geocode_run(
{"query": "Halensee,Berlin", "geometry": 'geojson'},
{},
)
assert result_geocode.raw['geojson'].get('type') == 'Polygon'
async def test_missing_reverse_details(self):
query = (46.46131, 6.84311)
res = await self.reverse_run(
{"query": query},
{}
)
assert "address" in res.raw
res = await self.reverse_run(
{"query": query, "addressdetails": False},
{},
)
assert 'address' not in res.raw
async def test_viewbox(self):
res = await self.geocode_run(
{"query": "Maple Street"},
{},
)
assert not (50 <= res.latitude <= 52)
assert not (-0.15 <= res.longitude <= -0.11)
for viewbox in [
((52, -0.11), (50, -0.15)),
[Point(52, -0.11), Point(50, -0.15)],
(("52", "-0.11"), ("50", "-0.15"))
]:
await self.geocode_run(
{"query": "Maple Street", "viewbox": viewbox},
{"latitude": 51.5223513, "longitude": -0.1382104}
)
async def test_bounded(self):
bb = (Point('56.588456', '84.719353'), Point('56.437293', '85.296822'))
query = (
'\u0441\u0442\u0440\u043e\u0438\u0442\u0435\u043b\u044c '
'\u0442\u043e\u043c\u0441\u043a'
)
await self.geocode_run(
{"query": query, "viewbox": bb},
{"latitude": 56.4129459, "longitude": 84.847831069814},
)
await self.geocode_run(
{"query": query, "viewbox": bb, "bounded": True},
{"latitude": 56.4803224, "longitude": 85.0060457653324},
)
async def test_extratags(self):
query = "Statue of Liberty"
location = await self.geocode_run(
{"query": query},
{},
)
assert location.raw.get('extratags') is None
location = await self.geocode_run(
{"query": query, "extratags": True},
{},
)
# 'wikidata': 'Q9202', 'wikipedia': 'en:Statue of Liberty'
assert location.raw['extratags']['wikidata'] == 'Q9202'
async def test_country_codes_moscow(self):
await self.geocode_run(
{"query": "moscow", "country_codes": "RU"},
{"latitude": 55.7507178, "longitude": 37.6176606,
"delta": 0.3},
)
location = await self.geocode_run(
{"query": "moscow", "country_codes": "US"},
# There are two possible results:
# Moscow Idaho: 46.7323875,-117.0001651
# Moscow Penn: 41.3367497,-75.5185191
{},
)
# We don't care which Moscow is returned, unless it's
# the Russian one. We can sort this out by asserting
# the longitudes. The Russian Moscow has positive longitudes.
assert -119 < location.longitude
assert location.longitude < -70
async def test_country_codes_str(self):
await self.geocode_run(
{"query": "kazan",
"country_codes": 'tr'},
{"latitude": 40.2317, "longitude": 32.6839, "delta": 2},
)
async def test_country_codes_list(self):
await self.geocode_run(
{"query": "kazan",
"country_codes": ['cn', 'tr']},
{"latitude": 40.2317, "longitude": 32.6839, "delta": 2},
)
@pytest.mark.parametrize(
"payload, expected",
[
pytest.param(
{"query": "mexico", "featuretype": 'country'},
{"latitude": 22.5000485, "longitude": -100.0000375, "delta": 5.0},
id="country",
),
pytest.param(
{"query": "mexico", "featuretype": 'state', "country_codes": "US"},
{"latitude": 34.5708167, "longitude": -105.993007, "delta": 2.0},
id="state",
),
pytest.param(
{"query": "mexico", "featuretype": 'city'},
{"latitude": 19.4326009, "longitude": -99.1333416, "delta": 2.0},
id="city",
marks=pytest.mark.xfail(reason='nominatim responds incorrectly here'),
),
pytest.param(
{"query": "georgia", "featuretype": 'settlement'},
{"latitude": 32.3293809, "longitude": -83.1137366, "delta": 2.0},
id="settlement",
),
]
)
async def test_featuretype_param(self, payload, expected):
await self.geocode_run(payload, expected)
async def test_namedetails(self):
query = "Kyoto, Japan"
result = await self.geocode_run(
{"query": query, "namedetails": True},
{},
)
assert 'namedetails' in result.raw
result = await self.geocode_run(
{"query": query, "namedetails": False},
{},
)
assert 'namedetails' not in result.raw
async def test_reverse_zoom_parameter(self):
query = "40.689253199999996, -74.04454817144321"
result_reverse = await self.reverse_run(
{"query": query, "zoom": 10},
{},
)
assert "New York" in result_reverse.address
assert "Statue of Liberty" not in result_reverse.address
result_reverse = await self.reverse_run(
{"query": query},
{},
)
assert "New York" in result_reverse.address
assert "Statue of Liberty" in result_reverse.address
class TestNominatim(BaseTestNominatim):
@classmethod
def make_geocoder(cls, **kwargs):
kwargs.setdefault('user_agent', 'geopy-test')
return Nominatim(**kwargs)
async def test_default_user_agent_error(self):
with pytest.raises(ConfigurationError):
Nominatim()
async def test_example_user_agent_error(self):
with pytest.raises(ConfigurationError):
Nominatim(user_agent="specify_your_app_name_here")
async def test_custom_user_agent_works(self):
Nominatim(user_agent='my_application')
with patch.object(geopy.geocoders.options, 'default_user_agent',
'my_application'):
Nominatim()
def test_import_deprecated_osm_module(self):
with warnings.catch_warnings(record=True) as w:
from geopy.geocoders.osm import Nominatim as OsmNominatim
assert len(w) == 1
assert OsmNominatim is Nominatim
| |
# File: CeTestBeds.py ; This file is part of Twister.
# version: 3.005
# Copyright (C) 2012-2014, Luxoft
# Authors:
# Andreea Proca <aproca@luxoft.com>
# Andrei Costachi <acostachi@luxoft.com>
# Cristi Constantin <crconstantin@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module to manage the test beds
'''
import os
import sys
import copy
import thread
try:
import simplejson as json
except:
import json
import cherrypy
from lxml import etree
from binascii import hexlify
from cherrypy import _cptools
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print 'TWISTER_PATH environment variable is not set! Exiting!'
exit(1)
if TWISTER_PATH not in sys.path:
sys.path.append(TWISTER_PATH)
from common.tsclogging import logFull, logDebug, logWarning, logError
#from common.helpers import user_info
from server.CeCommonAllocator import CommonAllocator
CONSTANT_DICTIONARY = {'version': 0, 'name': '/', 'id':"", 'path' : [], 'meta': {}, 'children': {}}
def xml_to_res(xml, gparams, skip_header=False):
"""
Import xml file to TB.
"""
def recursive_xml_to_res(xml, res_dict):
"""
Recursive method - read the xml and generate a dictionary.
"""
for folder in xml.findall('folder'):
tb_path = folder.find('path')
if tb_path is not None:
c_nd = {'path':[], 'meta': {}, 'id': "", 'children': {}}
tb_path_text = tb_path.text
tb_path_list = [q for q in tb_path_text.split('/') if q]
c_nd['path'].extend(tb_path_list)
else:
c_nd = {'meta': {}, 'id': "", 'children': {}}
# Populate META properties
meta = folder.find('meta')
if meta is not None:
for meta_params in meta.findall('param'):
meta_name = meta_params.find('name')
if meta_name is not None:
meta_value = meta_params.find('value')
if meta_value is not None and meta_value.text is not None:
c_nd['meta'][meta_name.text] = meta_value.text
else:
c_nd['meta'][meta_name.text] = ''
# If the XML node contains an ID, use it; else, create a random ID
tb_id = folder.find('id')
if tb_id is not None:
id_value = tb_id.find('value')
if id_value is not None and id_value.text is not None:
c_nd['id'] = id_value.text
else:
c_nd['id'] = hexlify(os.urandom(5))
else:
c_nd['id'] = hexlify(os.urandom(5))
# Add children for this node
res_dict[folder.find('fname').text] = c_nd
recursive_xml_to_res(folder, res_dict[folder.find('fname').text]['children'])
# we have to get the information at root level(path, meta, id, version) first
# version is added only if it exists in xml;
if not skip_header:
root_dict = {'path':[], 'meta':{}, 'id':"", 'children':{}}
tb_path_text = xml.find('path').text
if tb_path_text:
tb_path = [q for q in tb_path_text.split('/') if q]
root_dict['path'].extend(tb_path)
meta = xml.find('meta')
for meta_elem in meta:
key = meta_elem.find('name').text
val = meta_elem.find('value').text
if val:
root_dict['meta'][key] = val
else:
root_dict['meta'][key] = ''
id_value = xml.find('id').text
if id_value is None:
id_value = ""
root_dict['id'] = id_value
if xml.find('version') is not None and xml.find('version').text is not None:
root_dict['version'] = int(xml.find('version').text)
#else:
# root_dict['version'] = ''
gparams = root_dict
# rest of the xml file can be read recursively
recursive_xml_to_res(xml, gparams['children'])
return gparams
def res_to_xml(parent_node, xml, skip_header=False):
"""
Export TB to xml.
"""
# The node is valid ?
if not parent_node:
return False
# if we are at root level, we need to get path, meta, id and version fields
if not skip_header:
# path is a list with 0 or 1 elements
path = etree.SubElement(xml, 'path')
if parent_node.get('path') is not None and len(parent_node.get('path')) == 1:
path.text = '/'.join(parent_node.get('path'))
else:
path.text = ''
meta = etree.SubElement(xml, 'meta')
# meta is a dictionary
for k_val, v_val in parent_node.get('meta').iteritems():
tag = etree.SubElement(meta, 'param')
prop = etree.SubElement(tag, 'name')
prop.text = str(k_val)
val = etree.SubElement(tag, 'value')
if v_val:
val.text = str(v_val)
else:
val.text = ''
typ = etree.SubElement(tag, 'type')
typ.text = 'string'
etree.SubElement(tag, 'desc')
tb_id = etree.SubElement(xml, 'id')
tb_id.text = parent_node.get('id')
# add version only if it exists in dictionary; the SUT
# files don't have version
if parent_node.get('version') is not None:
version = etree.SubElement(xml, 'version')
version.text = str(parent_node.get('version'))
# This node has children ?
if not parent_node.get('children'):
return False
for node in sorted(parent_node['children'].keys()):
c_nd = dict(parent_node['children'][node])
# Create empty folder
folder = etree.SubElement(xml, 'folder')
# Folder fname
fname = etree.SubElement(folder, 'fname')
fname.text = node
# Folder fdesc
etree.SubElement(folder, 'fdesc')
# get the path if exists
if c_nd.get('path'):
path = etree.SubElement(folder, 'path')
path.text = '/'.join(c_nd.get('path'))
# get meta information
meta = etree.SubElement(folder, 'meta')
if type(c_nd['meta']) is dict:
for k_var, v_var in c_nd['meta'].iteritems():
tag = etree.SubElement(meta, 'param')
prop = etree.SubElement(tag, 'name')
prop.text = str(k_var)
val = etree.SubElement(tag, 'value')
if v_var:
val.text = str(v_var)
else:
val.text = ''
typ = etree.SubElement(tag, 'type')
typ.text = 'string'
etree.SubElement(tag, 'desc')
# get the id
if c_nd.get('id'):
tag = etree.SubElement(folder, 'id')
val = etree.SubElement(tag, 'value')
val.text = c_nd['id']
typ = etree.SubElement(tag, 'type')
typ.text = 'string'
etree.SubElement(tag, 'desc')
res_to_xml(c_nd, folder, True)
return xml
class TestBeds(_cptools.XMLRPCController, CommonAllocator):
"""
Basic operations for TestBeds.
"""
def __init__(self, project):
self.project = project
self.type = 'tb'
self.resources = CONSTANT_DICTIONARY
self.reservedResources = {}
self.lockedResources = {}
self.acc_lock = thread.allocate_lock() # Task change lock
self.ren_lock = thread.allocate_lock() # Rename lock
self.imp_lock = thread.allocate_lock() # Import lock
self.save_lock = thread.allocate_lock() # Save lock
self.load_lock = thread.allocate_lock() # Save lock
self.res_file = '{}/config/resources.json'.format(TWISTER_PATH)
self._loaded_users = {}
self.load_tb(verbose=True)
def load_tb(self, verbose=False):
"""
Parse resources file.
"""
logDebug('CeTestBeds:load_tb {}'.format(verbose))
with self.load_lock:
if not self.resources.get('children'):
self.resources = CONSTANT_DICTIONARY
# try to load test bed resources file
try:
f_p = open(self.res_file, 'r')
self.resources = json.load(f_p)
f_p.close()
del f_p
if verbose:
logDebug('TBs loaded successfully.')
except Exception as exp_err:
logError('Error loading TBs! {}'.format(exp_err))
is_res_modified = False
# make older resources files that don't have 'path' compatible
for res in self.resources.get('children'):
self.resources['children'][res]['path'] = [res]
modified = self.fix_path(self.resources['children'][res], [res])
if modified:
is_res_modified = True
# save the resources updated (with path field) for later usage
if is_res_modified:
issaved = self.save_tb()
if isinstance(issaved, str):
if issaved.startswith('*ERROR* '):
msg = "We could not save this TB for user = {}.".format(user_info[0])
logDebug(msg)
return "*ERROR* " + msg
return self.resources
def save_tb(self, props={}):
"""
Function used to write the changes on HDD.
"""
logFull('CeTestBeds:_save {}'.format(props))
log = []
with self.save_lock:
ver = self.resources.get('version', 0)
self.resources['version'] = ver + 1
try:
logDebug('Saving test bed file.')
with open(self.res_file, 'w') as f_p:
json.dump(self.resources, f_p, indent=4)
except Exception as exp_err:
log.append(exp_err)
if log:
logError(log)
return '*ERROR* ' + str(log)
return True
@cherrypy.expose
def get_tb(self, query, props={}):
"""
Get the current version of the tb modified and unsaved or
the version from the disk.
"""
logDebug('CeTestBeds:get_tb {} {}'.format(query, props))
user_info = self.user_info(props)
result = None
# If the resource is reserved, get the latest unsaved changes
if user_info[0] in self.reservedResources:
for i in range(len(self.reservedResources[user_info[0]].values())):
result = self.get_resource(query, self.reservedResources[user_info[0]].values()[i])
if isinstance(result, dict):
break
# Or get it from the disk
if not isinstance(result, dict):
result = self.get_resource(query)
if isinstance(result, dict):
if ':' in query:
meta = query.split(':')[1]
ret = result['meta'].get(meta, '')
return ret
else:
return self.format_resource(result, query)
return "*ERROR* no such resource: {}".format(query)
@cherrypy.expose
def delete_tb(self, res_query, props={}):
"""
Permanently delete a resource.
"""
logDebug('Delete TB `{}`, props {}.'.format(res_query, props))
resources = self.resources
user_info = self.user_info(props)
# If no resources...
if not resources.get('children'):
msg = 'User {}: There are no resources defined !'.format(user_info[0])
logError(msg)
return "*ERROR* " + msg
if ':' in res_query:
meta = res_query.split(':')[1]
res_query = res_query.split(':')[0]
else:
meta = ''
_is_res_reserved = self.is_resource_reserved(res_query, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logError(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(res_query)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logError(msg)
return '*ERROR* ' + msg
if meta:
# the path for the resource that has to be modified (delete it or meta)
result = self.get_reserved_resource(res_query, props)
if not result:
logDebug("This resource is not reserved")
return False
# we have to delete only the meta property
correct_path = copy.deepcopy(result['path'])
#modify meta for parent
if len(result['path']) == 1:
child = result
# modify to a component
else:
base_path = '/'.join(result['path'][1:])
child = self.get_path(base_path, result)
try:
child['meta'].pop(meta)
except:
msg = "This meta that you entered thoes not exist {}".format(meta)
logDebug(msg)
return "false"
child['path'] = correct_path
return "true"
else:
#we don't have meta, we delete the resource
resource_node = self.get_resource(res_query)
if isinstance(resource_node, dict) and ''.join(resource_node['path']) in self.resources['children']:
# user wants to delete the entire TB
if ''.join(resource_node['path']) in self.resources['children']:
self.resources['children'].pop(resource_node['path'][0])
issaved = self.save_tb(props)
if not issaved:
msg = "We could not save this TB: {}.".format(res_query)
logDebug(msg)
return "*ERROR* " + msg
#user wants to delete a component of the TB
else:
reserved_node = self.get_reserved_resource(res_query, props)
if not reserved_node:
logError('Cannot access reserved resource, path or ID `{}` !'.format(res_query))
return False
#get the direct parent of the resource
base_path = '/'.join(reserved_node['path'][1:-1])
extract_r = self.get_path(base_path, reserved_node)
try:
extract_r['children'].pop(reserved_node['path'][-1])
except:
logError('User {}: Can not find child. Maybe it just was renamed.'.format(user_info[0]))
return False
extract_r['path'] = reserved_node['path'][:-1]
return "true"
@cherrypy.expose
def rename_tb(self, query, new_name, props={}):
"""
Rename a resource.
"""
logDebug('Rename TB `{}`, new name `{}`, props {}.'.format(query, new_name, props))
user_info = self.user_info(props)
if ':' in query:
meta = query.split(':')[1]
query = query.split(':')[0]
else:
meta = ''
_is_res_reserved = self.is_resource_reserved(query, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logWarning(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(query)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logWarning(msg)
return '*ERROR* ' + msg
if '/' in new_name or ':' in new_name:
logWarning('New resource name cannot contain `/` or `:`!')
return False
# If no resources...
if not self.resources.get('children'):
msg = 'There are no resources defined !'
logError(msg)
return '*ERROR* ' + msg
# Correct node path
result = self.get_reserved_resource(query, props)
if not result:
logWarning('Cannot access reserved TB `{}` !')
return False
if result['path'][-1] == new_name:
logWarning('Nothing to rename for TB `{}` !'.format(new_name))
return True
with self.ren_lock:
# If must rename a Meta info
if meta:
try:
# Modify meta to the parent
if len(result['path']) == 1:
child = result
# Modify to a component
else:
base_path = '/'.join(result['path'][1:])
child = self.get_path(base_path, result)
child['meta'][new_name] = child['meta'].pop(meta)
except:
msg = 'Rename meta `{}` error, for TB `{}`!'.format(meta, result['path'][-1])
logWarning(msg)
return 'false'
return self.save_reserved_tb(query, props)
# If must rename a normal node
else:
# The parent is directly from the root and we want to rename its immediate children
if len(result['path']) == 2:
result['children'][new_name] = result['children'].pop(result['path'][-1])
# The component we want to rename is deep in the tree
elif len(result['path']) > 2:
base_path = '/'.join(result['path'][1:-1])
parent = self.get_path(base_path, result)
if not isinstance(parent, dict):
msg = msg = 'Rename error for TB `{}`, invalid parent \
on {}!'.format(result['path'][-1], result['id'])
logWarning(msg)
return 'false'
parent['children'][new_name] = parent['children'].pop(result['path'][-1])
else:
result['path'] = [new_name]
# Only have to change the current path and the path of the children
result['path'] = [result['path'][0]]
# Recursive update paths
self.change_path(result, result['path'])
return True
@cherrypy.expose
def create_component_tb(self, name, parent=None, props={}):
"""
Create a component for an existing TB.
Return new component's id.
"""
user_info = self.user_info(props)
props = self.valid_props(props)
if parent == '/' or parent == '1':
msg = "The parent value is not an existing TB. Maybe you want to add a new TB. Parent: {}".format(parent)
logError(msg)
return "*ERROR* " + msg
_is_res_reserved = self.is_resource_reserved(parent, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logError(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(parent)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logError(msg)
return '*ERROR* ' + msg
with self.acc_lock:
#the resource should be reserved previously
parent_p = self.get_reserved_resource(parent, props)
if not parent_p:
msg = "User {}: Could not find this TB: '{}'".format(user_info[0], parent)
logDebug(msg)
return "*ERROR* " + msg
#the resources is deep in the tree, we have to get its direct parent
if len(parent_p['path']) >= 2:
full_path = parent_p['path']
base_path = '/'.join(parent_p['path'][1:])
parent_p = self.get_path(base_path, parent_p)
parent_p['path'] = full_path
if '/' in name:
logDebug('Stripping slash characters from `{}`...'.format(name))
name = name.replace('/', '')
if name in parent_p['children']:
msg = "A component with this name '{}' already exists for this TB: '{}'".format(name, parent)
logDebug(msg)
return "*ERROR* " + msg
# the resource doesn't exist - create it
res_id = self.generate_index()
parent_p['children'][name] = {'id': res_id, 'meta': props, 'children': {}, \
'path': parent_p['path'] + [name]}
return res_id
@cherrypy.expose
def create_new_tb(self, name, parent=None, props={}):
"""
Create new test bed.
Return the id of the new created tb.
"""
user_info = self.user_info(props)
resources = self.resources
if parent != '/' and parent != '1':
msg = "The parent value is not root. Maybe you want to add a component\
to an existing SUT. Parent: {}".format(parent)
logError(msg)
return "*ERROR* " + msg
props = self.valid_props(props)
with self.acc_lock:
# root can not be reserved so we just take it
parent_p = self.get_resource('/', resources)
if not parent_p or isinstance(parent_p, str):
logFull("User: {} no result for query `{}`" .format(user_info[0], parent))
return None
if '/' in name:
logDebug('Stripping slash characters from `{}`...'.format(name))
name = name.replace('/', '')
if name in self.resources['children']:
msg = "User {}: A TB with name `{}` already exists!".format(user_info[0], name)
logDebug(msg)
return "*ERROR* " + msg
# the resource doesn't exist - create it
res_id = self.generate_index()
parent_p['children'][name] = {'id': res_id, 'meta': props, 'children': {}, 'path': [name]}
issaved = self.save_tb(props)
if not issaved:
msg = "User {}: Could not save TB `{}`".format(user_info[0], name)
logDebug(msg)
return "*ERROR* " + msg
return res_id
@cherrypy.expose
def update_meta_tb(self, name, parent=None, props={}):
"""
Modify a resource, using a name, a parent Path or ID and some properties.
"""
logDebug('parent = {} -- props = {} -- name = {}'.format(parent, props, name))
user_info = self.user_info(props)
resources = self.resources
props = self.valid_props(props)
if not props or not self.valid_props(props):
msg = "Wrong format for props = {}".format(props)
logDebug(msg)
return "*ERROR* " + msg
if parent == '/' or parent == "1":
#we can not reserve the root so we just take the TB we need
if name[0] != '/':
name = '/' + name
verify_reserved = name
else:
#take the TB that has the component we need
verify_reserved = parent
_is_res_reserved = self.is_resource_reserved(verify_reserved, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logError(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(verify_reserved)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: Reserve resource: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logError(msg)
return '*ERROR* ' + msg
with self.acc_lock:
l_props = dict(props)
if '__user' in l_props:
del l_props['__user']
# If this is the root resource, update the properties
if name == '/' and parent == '/':
resources['meta'].update(l_props)
# Write changes for Device or SUT
issaved = self.save_tb(props)
if not issaved:
msg = "User {}: We didnt save this entry = {} having props = {}".format(user_info[0], name, props)
logDebug(msg)
return "*ERROR* " + msg
return "true"
parent_p = self.get_reserved_resource(verify_reserved, props)
if not parent_p:
logError('User {}: Cannot access reserved resource `{}` !'.format(user_info[0], verify_reserved))
return False
#the resources is deep in the tree, we have to get its direct parent
if len(parent_p['path']) >= 2:
full_path = parent_p['path']
base_path = '/'.join(parent_p['path'][1:])
parent_p = self.get_path(base_path, parent_p)
parent_p['path'] = full_path
if '/' in name:
logDebug('User {}: Stripping slash characters from `{}`...'.format(user_info[0], name))
name = name.replace('/', '')
# the resource exists
if name in parent_p['children']:
child_p = parent_p['children'][name]
elif name in parent_p['path']:
child_p = parent_p
else:
return "User {}: *ERROR* the resource {} can not be found!".format(user_info[0], name)
# We have to update the props
child_p['meta'].update(l_props)
return "true"
@cherrypy.expose
def set_tb(self, name, parent=None, props={}):
"""
Higher level wrapper over functions Create new TB, create component and update meta.
"""
pdata = self.get_resource(parent)
user_info = self.user_info(props)
if not isinstance(pdata, dict):
logWarning('User `{}`: No such parent `{}`!'.format(user_info[0], parent))
return False
if (parent == '/' or parent == '1') and name not in pdata['children']:
return self.create_new_tb(name, parent, props)
# If exists, update meta
if name in pdata['children']:
return self.update_meta_tb(name, parent, props)
# This is a new component
else:
return self.create_component_tb(name, parent, props)
@cherrypy.expose
def reserve_tb(self, res_query, props={}):
"""
Reserve TB. Add to reservedResources.
"""
return self.reserve_resource(res_query, props)
@cherrypy.expose
def lock_tb(self, res_query, props={}):
"""
Lock TB. Add to lockedResources
"""
return self.lock_resource(res_query, props)
@cherrypy.expose
def unlock_tb(self, res_query, props={}):
"""
Unlock TB. Delete from lockedResources.
"""
return self.unlock_resource(res_query, props)
@cherrypy.expose
def is_tb_locked(self, res_query):
"""
Verify if TB is locked.
"""
result = self.is_resource_locked(res_query)
if not result:
return "false"
return result
@cherrypy.expose
def is_tb_reserved(self, res_query, props={}):
"""
Verify if TB is reserved.
"""
result = self.is_resource_reserved(res_query, props)
if not result:
return "false"
return result
@cherrypy.expose
def list_reserved_tb(self):
"""
Return all the reserved TBs.
"""
return self.reservedResources
@cherrypy.expose
def list_locked_tb(self):
"""
Return all the locked TBs.
"""
return self.lockedResources
@cherrypy.expose
def list_all_tbs(self):
"""
Fast list testbeds.
"""
#maybe some resources changed meanwhile
self.load_tb(verbose=False)
res = []
for k_val, v_val in self.resources.get('children').iteritems():
res.append([k_val, v_val['id']])
result = []
def quick_find_path(dictionary, spath):
"""
Find path.
"""
for usr, locks in dictionary.iteritems():
for id_tb, data in locks.iteritems():
path = data.get('path', [''])
if isinstance(path, str) or isinstance(path, unicode):
path = [path]
if path == [spath]:
return usr
return None
for tb_name, tb_id in sorted(res):
ruser = quick_find_path(self.reservedResources, tb_name)
luser = quick_find_path(self.lockedResources, tb_name)
if (not ruser) and (not luser):
result.append({'id': tb_id, 'name': tb_name, 'status': 'free'})
elif ruser:
result.append({'id': tb_id, 'name': tb_name, 'status': 'reserved', 'user': ruser})
elif luser:
result.append({'id': tb_id, 'name': tb_name, 'status': 'locked', 'user': luser})
# Both reserved and locked ?
else:
result.append({'id': tb_id, 'name': tb_name, 'status': 'reserved', 'user': ruser})
logDebug('Fast listing Resources... Found {}.'.format(res))
return result
@cherrypy.expose
def export_tb_xml(self, xml_file, props={}):
"""
Export as XML file.
"""
user_info = self.user_info(props)
logDebug('User {}: exporting to XML file `{}`'.format(user_info[0], xml_file))
try:
f_p = open(xml_file, 'w')
except:
msg = 'User {}: export XML: XML file `{}` cannot be written !'.format(user_info[0], xml_file)
logError(msg)
return '*ERROR* ' + msg
logDebug('User {}: exporting to XML file `{}`...'.format(user_info[0], xml_file))
xml = etree.Element('root')
res_to_xml(self.resources, xml, False)
f_p.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n\n')
f_p.write(etree.tostring(xml, pretty_print=True))
f_p.close()
return True
@cherrypy.expose
def import_tb_xml(self, xml_file, props={}):
"""
Import one XML file.
WARNING! This erases everything!
"""
user_info = self.user_info(props)
self.resources = CONSTANT_DICTIONARY
logDebug('{} -- XML file `{}` !'.format(user_info[0], xml_file))
if not os.path.isfile(xml_file):
msg = 'User {} import XML: XML file `{}` does not exist!'.format(user_info[0], xml_file)
logError(msg)
return '*ERROR* ' + msg
logDebug('User {}: importing XML file `{}`...'.format(user_info[0], xml_file))
try:
params_xml = etree.parse(xml_file)
except:
msg = "The file you selected: '{}' it's not an xml file. Try again!".format(xml_file)
logDebug(msg)
return '*ERROR* ' + msg
with self.imp_lock:
try:
self.resources = xml_to_res(params_xml, {})
except Exception as exp_err:
msg = 'User {}: Import XML: Exception `{}`.'.format(user_info[0], exp_err)
logError(msg)
return '*ERROR* ' + msg
# Write changes for Device or SUT
issaved = self.save_tb(props)
if isinstance(issaved, str):
logDebug("We could not save this TB.")
return False
return True
@cherrypy.expose
def save_reserved_tb(self, res_query, props={}):
"""
User has made some changes only on self.reserved_resources.
In this method we sync self.reserved_resources with self.resources
and the store on the disk
"""
logDebug('CeTestBeds:save_reserved_tb {}'.format(res_query))
user_info = self.user_info(props)
resources = self.resources
# If no resources...
if not resources.get('children'):
msg = 'User {}: Save reserved resource: There are no resources defined !'.format(user_info[0])
logError(msg)
return '*ERROR* ' + msg
user_info = self.user_info(props)
if ':' in res_query:
res_query = res_query.split(':')[0]
if not self.reservedResources.get(user_info[0]):
msg = "User {}: It seems that this user does not have changes to save!".format(user_info[0])
logError(msg)
return "*ERROR* " + msg
reserved_resources = copy.deepcopy(self.reservedResources.get(user_info[0]))
resource_node = self.get_resource(res_query)
if not resource_node or isinstance(resource_node, str):
msg = "User {}: Can not find this resource = {}".format(user_info[0], res_query)
logError(msg)
return "*ERROR* " + msg
if len(resource_node['path']) > 1:
resource_node = self.get_path(resource_node['path'][0], resources)
if not resource_node:
msg = "User {}: We didnt find this resource {}".format(user_info[0], res_query)
logDebug(msg)
return "*ERROR* " + msg
reserved_node = reserved_resources[resource_node['id']]
# maybe the user renamed the TB
if reserved_node != resource_node:
self.resources['children'][reserved_node['path'][0]] = \
self.resources['children'].pop(resource_node['path'][0])
# or maybe the name of the resource is the same
resources['children'].update([(reserved_node['path'][0], reserved_node), ])
# update path
resources['children'][reserved_node['path'][0]]['path'] = [reserved_node['path'][0]]
#now we have to save
issaved = self.save_tb(props)
if isinstance(issaved, str):
if issaved.startswith('*ERROR* '):
msg = "We could not save this TB for user = {}.".format(user_info[0])
logDebug(msg)
return "*ERROR* " + msg
return "true"
@cherrypy.expose
def save_release_reserved_tb(self, res_query, props={}):
"""
Save the changes. Sync self.resources with self.reserved_resources
and save to the disk
"""
logDebug('CeTestBeds:save_release_reserved_tb {} {}'.format(res_query, props))
# save changes
result = self.save_reserved_tb(res_query, props)
if result and not result.startswith("*ERROR*"):
user_info = self.user_info(props)
if ':' in res_query:
res_query = res_query.split(':')[0]
# get only the component
resource_node = self.get_resource(res_query)
if not resource_node or isinstance(resource_node, str):
logFull("Can not find the resoruce {}".format(res_query))
return None
# get the entire TB
if len(resource_node['path']) > 1:
resource_node = self.get_path(resource_node['path'][0], self.resources)
# delete this entry from reservedResources
reserved_node = self.reservedResources[user_info[0]][resource_node['id']]
self.reservedResources[user_info[0]].pop(reserved_node['id'])
if not self.reservedResources[user_info[0]]:
self.reservedResources.pop(user_info[0])
else:
return result
return True
@cherrypy.expose
def discard_release_reserved_tb(self, res_query, props={}):
"""
Discard changes and release test bed.
Delete entry from reservedResources.
"""
return self.discard_release_reserved_resource(res_query, props)
# Eof()
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseNetworkTest(tempest.test.BaseTestCase):
"""
Base class for the Neutron tests that use the Tempest Neutron REST client
Per the Neutron API Guide, API v1.x was removed from the source code tree
(docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
following options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
Finally, it is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
neutron as True
"""
force_tenant_isolation = False
# Default to ipv4.
_ip_version = 4
@classmethod
def setUpClass(cls):
# Create no network resources for these test.
cls.set_network_resources()
super(BaseNetworkTest, cls).setUpClass()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
os = cls.get_client_manager()
cls.network_cfg = CONF.network
cls.client = os.network_client
cls.networks = []
cls.subnets = []
cls.ports = []
cls.routers = []
cls.pools = []
cls.vips = []
cls.members = []
cls.health_monitors = []
cls.vpnservices = []
cls.ikepolicies = []
cls.floating_ips = []
cls.metering_labels = []
cls.metering_label_rules = []
cls.fw_rules = []
cls.fw_policies = []
cls.ipsecpolicies = []
@classmethod
def tearDownClass(cls):
# Clean up ipsec policies
for ipsecpolicy in cls.ipsecpolicies:
cls.client.delete_ipsecpolicy(ipsecpolicy['id'])
# Clean up firewall policies
for fw_policy in cls.fw_policies:
cls.client.delete_firewall_policy(fw_policy['id'])
# Clean up firewall rules
for fw_rule in cls.fw_rules:
cls.client.delete_firewall_rule(fw_rule['id'])
# Clean up ike policies
for ikepolicy in cls.ikepolicies:
cls.client.delete_ikepolicy(ikepolicy['id'])
# Clean up vpn services
for vpnservice in cls.vpnservices:
cls.client.delete_vpnservice(vpnservice['id'])
# Clean up floating IPs
for floating_ip in cls.floating_ips:
cls.client.delete_floatingip(floating_ip['id'])
# Clean up routers
for router in cls.routers:
cls.delete_router(router)
# Clean up health monitors
for health_monitor in cls.health_monitors:
cls.client.delete_health_monitor(health_monitor['id'])
# Clean up members
for member in cls.members:
cls.client.delete_member(member['id'])
# Clean up vips
for vip in cls.vips:
cls.client.delete_vip(vip['id'])
# Clean up pools
for pool in cls.pools:
cls.client.delete_pool(pool['id'])
# Clean up metering label rules
for metering_label_rule in cls.metering_label_rules:
cls.admin_client.delete_metering_label_rule(
metering_label_rule['id'])
# Clean up metering labels
for metering_label in cls.metering_labels:
cls.admin_client.delete_metering_label(metering_label['id'])
# Clean up ports
for port in cls.ports:
cls.client.delete_port(port['id'])
# Clean up subnets
for subnet in cls.subnets:
cls.client.delete_subnet(subnet['id'])
# Clean up networks
for network in cls.networks:
cls.client.delete_network(network['id'])
cls.clear_isolated_creds()
super(BaseNetworkTest, cls).tearDownClass()
@classmethod
def create_network(cls, network_name=None):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name('test-network-')
resp, body = cls.client.create_network(name=network_name)
network = body['network']
cls.networks.append(network)
return network
@classmethod
def create_subnet(cls, network, gateway=None, cidr=None, mask_bits=None):
"""Wrapper utility that returns a test subnet."""
# The cidr and mask_bits depend on the ip version.
if cls._ip_version == 4:
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
elif cls._ip_version == 6:
cidr = (
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
for subnet_cidr in cidr.subnet(mask_bits):
if not gateway:
gateway = str(netaddr.IPAddress(subnet_cidr) + 1)
try:
resp, body = cls.client.create_subnet(
network_id=network['id'],
cidr=str(subnet_cidr),
ip_version=cls._ip_version,
gateway_ip=gateway)
break
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
# Unset gateway value if there is an overlapping subnet
gateway = None
if not is_overlapping_cidr:
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
subnet = body['subnet']
cls.subnets.append(subnet)
return subnet
@classmethod
def create_port(cls, network, **kwargs):
"""Wrapper utility that returns a test port."""
resp, body = cls.client.create_port(network_id=network['id'],
**kwargs)
port = body['port']
cls.ports.append(port)
return port
@classmethod
def update_port(cls, port, **kwargs):
"""Wrapper utility that updates a test port."""
resp, body = cls.client.update_port(port['id'],
**kwargs)
return body['port']
@classmethod
def create_router(cls, router_name=None, admin_state_up=False,
external_network_id=None, enable_snat=None):
ext_gw_info = {}
if external_network_id:
ext_gw_info['network_id'] = external_network_id
if enable_snat:
ext_gw_info['enable_snat'] = enable_snat
resp, body = cls.client.create_router(
router_name, external_gateway_info=ext_gw_info,
admin_state_up=admin_state_up)
router = body['router']
cls.routers.append(router)
return router
@classmethod
def create_floatingip(cls, external_network_id):
"""Wrapper utility that returns a test floating IP."""
resp, body = cls.client.create_floatingip(
floating_network_id=external_network_id)
fip = body['floatingip']
cls.floating_ips.append(fip)
return fip
@classmethod
def create_pool(cls, name, lb_method, protocol, subnet):
"""Wrapper utility that returns a test pool."""
resp, body = cls.client.create_pool(
name=name,
lb_method=lb_method,
protocol=protocol,
subnet_id=subnet['id'])
pool = body['pool']
cls.pools.append(pool)
return pool
@classmethod
def update_pool(cls, name):
"""Wrapper utility that returns a test pool."""
resp, body = cls.client.update_pool(name=name)
pool = body['pool']
return pool
@classmethod
def create_vip(cls, name, protocol, protocol_port, subnet, pool):
"""Wrapper utility that returns a test vip."""
resp, body = cls.client.create_vip(name=name,
protocol=protocol,
protocol_port=protocol_port,
subnet_id=subnet['id'],
pool_id=pool['id'])
vip = body['vip']
cls.vips.append(vip)
return vip
@classmethod
def update_vip(cls, name):
resp, body = cls.client.update_vip(name=name)
vip = body['vip']
return vip
@classmethod
def create_member(cls, protocol_port, pool):
"""Wrapper utility that returns a test member."""
resp, body = cls.client.create_member(address="10.0.9.46",
protocol_port=protocol_port,
pool_id=pool['id'])
member = body['member']
cls.members.append(member)
return member
@classmethod
def update_member(cls, admin_state_up):
resp, body = cls.client.update_member(admin_state_up=admin_state_up)
member = body['member']
return member
@classmethod
def create_health_monitor(cls, delay, max_retries, Type, timeout):
"""Wrapper utility that returns a test health monitor."""
resp, body = cls.client.create_health_monitor(delay=delay,
max_retries=max_retries,
type=Type,
timeout=timeout)
health_monitor = body['health_monitor']
cls.health_monitors.append(health_monitor)
return health_monitor
@classmethod
def update_health_monitor(cls, admin_state_up):
resp, body = cls.client.update_vip(admin_state_up=admin_state_up)
health_monitor = body['health_monitor']
return health_monitor
@classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
resp, interface = cls.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
return interface
@classmethod
def create_vpnservice(cls, subnet_id, router_id):
"""Wrapper utility that returns a test vpn service."""
resp, body = cls.client.create_vpnservice(
subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
name=data_utils.rand_name("vpnservice-"))
vpnservice = body['vpnservice']
cls.vpnservices.append(vpnservice)
return vpnservice
@classmethod
def create_ikepolicy(cls, name):
"""Wrapper utility that returns a test ike policy."""
resp, body = cls.client.create_ikepolicy(name=name)
ikepolicy = body['ikepolicy']
cls.ikepolicies.append(ikepolicy)
return ikepolicy
@classmethod
def create_firewall_rule(cls, action, protocol):
"""Wrapper utility that returns a test firewall rule."""
resp, body = cls.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action=action,
protocol=protocol)
fw_rule = body['firewall_rule']
cls.fw_rules.append(fw_rule)
return fw_rule
@classmethod
def create_firewall_policy(cls):
"""Wrapper utility that returns a test firewall policy."""
resp, body = cls.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy = body['firewall_policy']
cls.fw_policies.append(fw_policy)
return fw_policy
@classmethod
def delete_router(cls, router):
resp, body = cls.client.list_router_interfaces(router['id'])
interfaces = body['ports']
for i in interfaces:
cls.client.remove_router_interface_with_subnet_id(
router['id'], i['fixed_ips'][0]['subnet_id'])
cls.client.delete_router(router['id'])
@classmethod
def create_ipsecpolicy(cls, name):
"""Wrapper utility that returns a test ipsec policy."""
_, body = cls.client.create_ipsecpolicy(name=name)
ipsecpolicy = body['ipsecpolicy']
cls.ipsecpolicies.append(ipsecpolicy)
return ipsecpolicy
class BaseAdminNetworkTest(BaseNetworkTest):
@classmethod
def setUpClass(cls):
super(BaseAdminNetworkTest, cls).setUpClass()
admin_username = CONF.compute_admin.username
admin_password = CONF.compute_admin.password
admin_tenant = CONF.compute_admin.tenant_name
if not (admin_username and admin_password and admin_tenant):
msg = ("Missing Administrative Network API credentials "
"in configuration.")
raise cls.skipException(msg)
if (CONF.compute.allow_tenant_isolation or
cls.force_tenant_isolation is True):
cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
cls.admin_client = cls.os_adm.network_client
@classmethod
def create_metering_label(cls, name, description):
"""Wrapper utility that returns a test metering label."""
resp, body = cls.admin_client.create_metering_label(
description=description,
name=data_utils.rand_name("metering-label"))
metering_label = body['metering_label']
cls.metering_labels.append(metering_label)
return metering_label
@classmethod
def create_metering_label_rule(cls, remote_ip_prefix, direction,
metering_label_id):
"""Wrapper utility that returns a test metering label rule."""
resp, body = cls.admin_client.create_metering_label_rule(
remote_ip_prefix=remote_ip_prefix, direction=direction,
metering_label_id=metering_label_id)
metering_label_rule = body['metering_label_rule']
cls.metering_label_rules.append(metering_label_rule)
return metering_label_rule
| |
from ionotomo import *
import numpy as np
import pylab as plt
def test_turbulent_realisation(plot=True):
xvec = np.linspace(-100,100,100)
zvec = np.linspace(0,1000,1000)
M = np.zeros([100,100,1000])
TCI = TriCubic(xvec,xvec,zvec,M)
print("Matern 1/2 kernel")
cov_obj = Covariance(tci=TCI)
sigma = 1.
corr = 30.
nu = 1./2.
print("Testing spectral density")
B = cov_obj.realization()
print("Fluctuations measured {}".format((np.percentile(B.flatten(),95) + np.percentile(-B.flatten(),95))))
#xy slice
x = TCI.xvec
y = TCI.yvec
z = TCI.zvec
X,Y,Z = np.meshgrid(x,y,z,indexing='ij')
dx = x[1] - x[0]
dy = y[1] - y[0]
dz = z[1] - z[0]
if plot and True:
f = plt.figure(figsize=(8,4))
vmin = np.min(B)
vmax = np.max(B)
ax = f.add_subplot(1,3,1)
ax.imshow(B[49,:,:],extent=(z[0],z[-1],y[0],y[-1]),vmin=vmin,vmax=vmax)
ax = f.add_subplot(1,3,2)
plt.imshow(B[:,49,:],extent=(z[0],z[-1],x[0],x[-1]),vmin=vmin,vmax=vmax)
ax = f.add_subplot(1,3,3)
im = plt.imshow(B[:,:,499],extent=(y[0],y[-1],x[0],x[-1]),vmin=vmin,vmax=vmax)
plt.colorbar(im)
plt.show()
print("testing contraction C^{-1}.phi")
phi = np.zeros_like(TCI.M)
#phi = np.cos(R*4)*np.exp(-R)
phi = X**2 + Y**2 + Z**4
phihat = cov_obj.contract(phi)
assert not np.any(np.isnan(phihat))
#Analytic for exp covariance is 1/(8*np.pi*sigma**2) * (1/L**3 * phi - 2/L * Lap phi + L * Lap Lap phi)
# 1/(8*np.pi*sigma**2) * (1/L**3 * phi + 2/L * sin(2 pi Z / 20)*(2*pi/20)**2 + L * sin(2 pi Z / 20)*(2*pi/20)**4)
phih = 1./(8*np.pi*sigma**2) * ( 1./corr**3 * phi - 2./corr *(2 + 2 + 2*Z**2) + corr*4)
if plot:
f = plt.figure(figsize=(12,12))
ax = f.add_subplot(3,3,1)
ax.set_title("phi")
im = ax.imshow(phi[50,:,:],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,2)
ax.set_title("FFT based")
im = plt.imshow(phihat[50,:,:],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,3)
ax.set_title("Analytic")
im = plt.imshow(phih[50,:,:],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,4)
im = ax.imshow(phi[:,20,:],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,5)
im = plt.imshow(phihat[:,20,:],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,6)
im = plt.imshow(phih[:,20,:],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,7)
im = ax.imshow(phi[:,:,70],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,8)
im = plt.imshow(phihat[:,:,70],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,9)
im = plt.imshow(phih[:,:,70],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
plt.tight_layout()
plt.show()
return
phih = phi.copy()/corr**3
from scipy import ndimage
stencil = np.zeros([3,3,3])
for i in range(-1,2):
for j in range(-1,2):
for k in range(-1,2):
s = 0
if i == 0:
s += 1
if j == 0:
s += 1
if k == 0:
s += 1
if s == 3:
stencil[i,j,k] = -2*3.
if s == 3 - 1:
stencil[i,j,k] = 1.
stencil /= (dx*dy*dz)**(2./3.)
lap = ndimage.convolve(phi,stencil,mode='wrap')
phih -= 2/corr*lap
laplap = ndimage.convolve(lap,stencil,mode='wrap')
phih += corr*laplap
phih /= 8*np.pi*sigma**2
if plot:
f = plt.figure(figsize=(12,12))
ax = f.add_subplot(3,3,1)
ax.set_title("phi")
im = ax.imshow(phi[50,:,:],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,2)
ax.set_title("FFT based")
im = plt.imshow(phihat[50,:,:],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,3)
ax.set_title("Analytic")
im = plt.imshow(phih[50,:,:],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,4)
im = ax.imshow(phi[:,20,:],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,5)
im = plt.imshow(phihat[:,20,:],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,6)
im = plt.imshow(phih[:,20,:],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,7)
im = ax.imshow(phi[:,:,70],extent=(z[0],z[-1],y[0],y[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,8)
im = plt.imshow(phihat[:,:,70],extent=(z[0],z[-1],x[0],x[-1]))
plt.colorbar(im)
ax = f.add_subplot(3,3,9)
im = plt.imshow(phih[:,:,70],extent=(y[0],y[-1],x[0],x[-1]))
plt.colorbar(im)
plt.show()
| |
#!/usr/bin/env python
import sqlite3
import logging
import argparse
import sys
import hashlib
from prettytable import from_db_cursor
# config
DATABASE = 'doorlock.db'
def create_hash(text):
h = hashlib.sha256()
h.update(text)
return h.hexdigest()
# parse arguments
parser = argparse.ArgumentParser()
parser_subs = parser.add_subparsers(help='entity help', dest='entity')
parser_person = parser_subs.add_parser('person', help='a help')
parser_person_subs = parser_person.add_subparsers(help='sub-command help', dest='action')
parser_person_create = parser_person_subs.add_parser('create', help='create a new person')
parser_person_create.add_argument("name")
parser_person_create.add_argument("group_id")
parser_person_list = parser_person_subs.add_parser('list', help='list persons')
parser_person_enable = parser_person_subs.add_parser('enable', help='enable a person to unlock the door')
parser_person_enable.add_argument("name")
parser_person_disable = parser_person_subs.add_parser('disable', help='disable a person to unlock the door')
parser_person_disable.add_argument("name")
parser_person_rename = parser_person_subs.add_parser('rename', help='rename a person')
parser_person_rename.add_argument("old_name")
parser_person_rename.add_argument("new_name")
parser_person_show = parser_person_subs.add_parser('show', help='show details about a person')
parser_person_show.add_argument("name")
parser_token = parser_subs.add_parser('token', help='a help')
parser_token_subs = parser_token.add_subparsers(help='sub-command help', dest='action')
parser_token_add = parser_token_subs.add_parser('add', help='add a new token')
parser_token_add.add_argument("person")
parser_token_add.add_argument("token")
parser_token_add.add_argument("pin")
parser_token_reset = parser_token_subs.add_parser('reset', help='reset pin')
parser_token_reset.add_argument("person")
parser_token_reset.add_argument("pin")
parser_token_remove_desc = 'Removes a token from the database. You can find the list of a users tokens by running "person show <name>"'
parser_token_remove = parser_token_subs.add_parser('remove', help='remove a token', description=parser_token_remove_desc)
parser_token_remove.add_argument("token")
parser_group = parser_subs.add_parser('group', help='a help')
parser_group_subs = parser_group.add_subparsers(help='sub-command help', dest='action')
parser_group_list = parser_group_subs.add_parser('list', help='list groups')
args = parser.parse_args()
# get logger
logging.basicConfig(format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO)
logger = logging.getLogger("manage_db")
logger.info("Starting doorlock backend")
# instantiate db connection
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
logger.debug("Database opened")
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND (name='dl_persons' OR name='dl_tokens');")
if c.fetchone() == None:
logger.warning("Tables do not exist. Creating them.")
sqlscript = open('doorlock.db.sql','r')
c.executescript(sqlscript.read())
# Person actions
if args.entity == "person":
# Create new person
if args.action == "create":
# Check whether person already exists
t = (args.name,)
c.execute("SELECT * FROM dl_persons WHERE name=?;",t)
if c.fetchone() != None:
logger.error("Person '%s' already exists.",args.name)
sys.exit(1)
# Check whether person already exists
t = (args.group_id,)
c.execute("SELECT * FROM dl_groups WHERE id=?;",t)
if c.fetchone() == None:
logger.error("Group '%s' does not exist.",args.group_id)
sys.exit(1)
# Add new person
t = (args.name, args.group_id,)
c.execute("INSERT INTO dl_persons(name, group_id, disabled) VALUES (?, ?, '0');",t)
if c.rowcount == 1:
logger.info("Person '%s' successfully created.", args.name)
else:
logger.error("Error while creating person'%s'.",args.name)
# List all persons
elif args.action == "list":
c.execute("SELECT g.name 'Group', p.name 'Name', p.disabled 'Disabled?', COUNT(t.id) 'No of tokens' FROM (dl_persons p LEFT JOIN dl_tokens t ON t.person_id = p.id) INNER JOIN dl_groups g ON p.group_id = g.id GROUP BY p.id, g.id ORDER BY g.name, p.name;")
pt = from_db_cursor(c)
print pt
# Enable / Disable persons
elif args.action == "disable" or args.action == "enable":
# Check whether person exists
t = (args.name,)
c.execute("SELECT * FROM dl_persons WHERE name=?;",t)
if c.fetchone() == None:
logger.error("Person '%s' does not exist.",args.name)
sys.exit(1)
if args.action == "disable":
c.execute("UPDATE dl_persons SET disabled = 1 WHERE name=?;",t)
else:
c.execute("UPDATE dl_persons SET disabled = 0 WHERE name=?;",t)
if c.rowcount == 1:
logger.info("Person '%s' successfully %sd.", args.name, args.action)
else:
logger.error("Error while disabling person'%s'.",args.name)
# Rename a person
elif args.action == "rename":
# Check whether person exists
t = (args.old_name,)
c.execute("SELECT * FROM dl_persons WHERE name=?;",t)
if c.fetchone() == None:
logger.error("Person '%s' does not exist.",args.old_name)
sys.exit(1)
t = (args.new_name,)
c.execute("SELECT * FROM dl_persons WHERE name=?;",t)
if c.fetchone() != None:
logger.error("Person '%s' already exists.",args.new_name)
sys.exit(1)
t = (args.new_name,args.old_name,)
c.execute("UPDATE dl_persons SET name = ? WHERE name=?;",t)
if c.rowcount == 1:
logger.info("Person '%s' successfully renamed to '%s'.", args.old_name, args.new_name)
else:
logger.error("Error while renaming person'%s'.",args.old_name)
elif args.action == "show":
# Check whether person exists
t = (args.name,)
c.execute("SELECT dl_persons.id, dl_persons.name, dl_groups.name, dl_persons.disabled FROM dl_persons, dl_groups WHERE dl_persons.name=? AND dl_groups.id=dl_persons.group_id;",t)
row = c.fetchone()
if row == None:
logger.error("Person '%s' does not exist.",args.name)
sys.exit(1)
# Print person details
name = row[1]
group = row[2]
if row[3] == 0:
status = "ENABLED"
else:
status = "DISABLED"
print name
print "=============="
print "group: %s" % group
print "status: %s\n" % status
# Print token list
print "Tokens:"
t = (row[0],)
c.execute("SELECT token FROM dl_tokens WHERE person_id=?;",t)
tokenCount = 0
for token in c:
print " %s" % token[0]
tokenCount = tokenCount + 1
if tokenCount == 0:
print " No Tokens"
# Token actions
elif args.entity == "token":
# Add new token
if args.action == "add":
# Check whether person exists
t = (args.person,)
c.execute("SELECT id FROM dl_persons WHERE name=?;",t)
row = c.fetchone()
if row == None:
logger.error("Person '%s' does not exist.", args.person)
sys.exit(1)
# Check whether token is already used
t = (args.token,create_hash(args.token + ":" + args.pin),)
c.execute("SELECT * FROM dl_tokens WHERE token=? AND pin =?;",t)
if c.fetchone() != None:
logger.error("Token already exists.")
sys.exit(1)
# Create token
t = (row[0],args.token,create_hash(args.token + ":" + args.pin),)
c.execute("INSERT INTO dl_tokens(person_id, token, pin) VALUES (?,?,?);",t)
if c.rowcount == 1:
logger.info("Token for '%s' successfully created.", args.person)
else:
logger.error("Error while adding token.")
# Set new PIN for an existing token
elif args.action == "reset":
# Check whether person exists
t = (args.person,)
c.execute("SELECT id FROM dl_persons WHERE name=?;",t)
row = c.fetchone()
if row == None:
logger.error("Person '%s' does not exist.", args.person)
sys.exit(1)
t = (row[0],)
c.execute("SELECT person_id, token FROM dl_tokens WHERE person_id=?;", t)
row = c.fetchone()
if row != None:
if c.fetchone() != None:
logger.error("Not implemented.")
sys.exit(1)
logger.info("One token for '%s'found. Changing PIN...", args.person)
t = (create_hash(row[1] + ":" + args.pin),row[0],row[1],)
c.execute("UPDATE dl_tokens SET pin=? WHERE person_id=? AND token=?;",t)
if c.rowcount == 1:
logger.info("PIN for '%s' successfully updated.", args.person)
else:
logger.error("Error while updating pin.")
else:
logger.error("No token found.")
# Remove token
elif args.action == "remove":
# Check whether token exists
t = (args.token,)
c.execute("SELECT id FROM dl_tokens WHERE token=?;",t)
row = c.fetchone()
if row == None:
logger.error("Token '%s' does not exist.", args.token)
sys.exit(1)
logger.info("Removing token '%s'.", args.token)
t = (row[0],)
c.execute("DELETE FROM dl_tokens WHERE id=?;",t)
if c.rowcount == 1:
logger.info("Token '%s' successfully removed.", args.person)
else:
logger.error("Error while removing token.")
# Group actions
elif args.entity == "group":
if args.action == "list":
c.execute("SELECT g.name 'Group', COUNT(p.id) 'No of people', COUNT(t.id) 'No of tokens', g.id 'Group ID' FROM (dl_persons p LEFT JOIN dl_tokens t ON t.person_id = p.id) INNER JOIN dl_groups g ON p.group_id = g.id GROUP BY g.id ORDER BY g.name;")
pt = from_db_cursor(c)
print pt
conn.commit()
conn.close()
| |
# Copyright (c) 2014-2015 Bruno Daniel <bruno.daniel@blue-yonder.com>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Pylint plugin for checking in Sphinx, Google, or Numpy style docstrings
"""
from __future__ import print_function, division, absolute_import
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import node_frame_class
import pylint.extensions._check_docs_utils as utils
class DocstringParameterChecker(BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicity raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Activate this checker by adding the line::
load-plugins=pylint.extensions.check_docs
to the ``MASTER`` section of your ``.pylintrc``.
:param linter: linter object
:type linter: :class:`pylint.lint.PyLinter`
"""
__implements__ = IAstroidChecker
name = 'docstring_params'
msgs = {
'W9003': ('"%s" missing or differing in parameter documentation',
'missing-param-doc',
'Please add parameter declarations for all parameters.'),
'W9004': ('"%s" missing or differing in parameter type documentation',
'missing-type-doc',
'Please add parameter type declarations for all parameters.'),
'W9005': ('"%s" has constructor parameters documented in class and __init__',
'multiple-constructor-doc',
'Please remove parameter declarations in the class or constructor.'),
'W9006': ('"%s" not documented as being raised',
'missing-raises-doc',
'Please document exceptions for all raised exception types.'),
'W9007': ('Missing return type documentation',
'missing-returns-doc',
'Please add documentation about what this method returns.'),
'W9008': ('Redundant returns documentation',
'redundant-returns-doc',
'Please remove the return documentation from this method.'),
}
options = (('accept-no-param-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing parameter '
'documentation in a docstring of a function that has '
'parameters.'
}),
('accept-no-raise-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing raises'
'documentation in a docstring of a function that'
'raises an exception.'
}),
('accept-no-return-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing return'
'documentation in a docstring of a function that'
'returns a statement.'
}),
)
priority = -2
constructor_names = {'__init__', '__new__'}
not_needed_param_in_docstring = {'self', 'cls'}
def visit_functiondef(self, node):
"""Called for function and method definitions (def).
:param node: Node for a function or method definition in the AST
:type node: :class:`astroid.scoped_nodes.Function`
"""
node_doc = utils.docstringify(node.doc)
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
def check_functiondef_params(self, node, node_doc):
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = node_frame_class(node)
if class_node is not None:
class_doc = utils.docstringify(class_node.doc)
self.check_single_constructor_params(class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = class_doc.has_params() or None
class_allow_no_param = node_doc.has_params() or None
self.check_arguments_in_docstring(
class_doc, node.args, class_node, class_allow_no_param)
self.check_arguments_in_docstring(
node_doc, node.args, node, node_allow_no_param)
def check_functiondef_returns(self, node, node_doc):
return_nodes = node.nodes_of_class(astroid.Return)
if (node_doc.has_returns() and
not any(utils.returns_something(ret_node) for ret_node in return_nodes)):
self.add_message(
'redundant-returns-doc',
node=node)
def visit_raise(self, node):
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = utils.possible_exc_types(node)
if not expected_excs:
return
doc = utils.docstringify(func_node.doc)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs = doc.exceptions()
missing_excs = expected_excs - found_excs
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node):
if not utils.returns_something(node):
return
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
if not doc.has_returns():
self.add_message(
'missing-returns-doc',
node=func_node
)
def check_arguments_in_docstring(self, doc, arguments_node, warning_node,
accept_no_param_doc=None):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
:param doc: Docstring for the function, method or class.
:type doc: str
:param arguments_node: Arguments node for the function, method or
class constructor.
:type arguments_node: :class:`astroid.scoped_nodes.Arguments`
:param warning_node: The node to assign the warnings to
:type warning_node: :class:`astroid.scoped_nodes.Node`
:param accept_no_param_doc: Whether or not to allow no parameters
to be documented.
If None then this value is read from the configuration.
:type accept_no_param_doc: bool or None
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = set(arg.name for arg in arguments_node.args)
expected_argument_names.update(arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = (
self.not_needed_param_in_docstring.copy())
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if (not params_with_doc and not params_with_type
and accept_no_param_doc):
tolerate_missing_params = True
def _compare_args(found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are inconsistencies.
:param list found_argument_names: argument names found in the
docstring
:param str message_id: pylint message id
:param not_needed_names: names that may be omitted
:type not_needed_names: set of str
"""
if not tolerate_missing_params:
missing_or_differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names)
else:
missing_or_differing_argument_names = (
(found_argument_names - expected_argument_names)
- not_needed_names)
if missing_or_differing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(missing_or_differing_argument_names)),),
node=warning_node)
_compare_args(params_with_doc, 'missing-param-doc',
self.not_needed_param_in_docstring)
_compare_args(params_with_type, 'missing-type-doc',
not_needed_type_in_docstring)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
if class_doc.has_params() and init_doc.has_params():
self.add_message(
'multiple-constructor-doc',
args=(class_node.name,),
node=class_node)
def _handle_no_raise_doc(self, excs, node):
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""
Adds a message on :param:`node` for the missing exception type.
:param missing_excs: A list of missing exception types.
:type missing_excs: list
:param node: The node show the message on.
:type node: astroid.node_classes.NodeNG
"""
if not missing_excs:
return
self.add_message(
'missing-raises-doc',
args=(', '.join(sorted(missing_excs)),),
node=node)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(DocstringParameterChecker(linter))
| |
#
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Base classes for bindings to execution agents.
'''
import Queue
import threading
import time
import sys
import traceback
import random
from actuator import ConfigModel, NamespaceModel, InfraModel, ActuatorException
from actuator.utils import LOG_INFO, root_logger
from actuator.modeling import AbstractModelReference
class ExecutionException(ActuatorException):
def __init__(self, message=None, response="None available"):
super(ExecutionException, self).__init__(message)
self.response = response
class ConfigRecord(object):
"""
Returned by the execution agent; a record of the tasks that have been
performed as part of the orchestration.
"""
def __init__(self):
"""
Sets up a record container for tasks as they complete. The attribute
completed_tasks is a public list of the tasks that have successfully
completed during orchestration. It is a list of 2-tuples,
(task, completion time.ctime).
"""
self.completed_tasks = []
def record_completed_task(self, task):
"""
Captures the completion of a single task. Adds the 2-tuple
(task, time.ctime()) to the completed_tasks list.
"""
self.completed_tasks.append((task, time.ctime()))
def is_completed(self, task):
return task in set([r[0] for r in self.completed_tasks])
class ExecutionAgent(object):
"""
Base class for execution agents. The mechanics of actually executing a task
are left to the derived class; this class takes care of all the business of
managing the task dependency graph and deciding what tasks should be run
when.
"""
exception_class = ExecutionException
exec_agent = "exec_agent"
def __init__(self, exec_model_instance=None, config_model_instance=None,
namespace_model_instance=None, infra_model_instance=None,
num_threads=5, do_log=False, no_delay=False, log_level=LOG_INFO):
"""
Make a new ExecutionAgent
@keyword exec_model_instance: Reserved for latter use
@keyword config_model_instance: an instance of a derived class of
ConfigModel
@keyword namespace_model_instance: an instance of a derived class of
NamespaceModel
@keyword infra_model_instance: UNUSED; an instance of a derived class of
InfraModel
@keyword num_threads: Integer, default 5. The number of worker threads
to spin up to perform tasks.
@keyword do_log: boolean, default False. If True, creates a log file
that contains more detailed logs of the activities carried out.
Independent of log_level (see below).
@keyword no_delay: booleand, default False. The default causes a short
pause of up to 2.5 seconds to be taken before a task is started.
This keeps a single host from being bombarded with too many ssh
requests at the same time in the case where a number of different
tasks can all start in parallel on the same Role's host.
@keyword log_level: Any of the symbolic log levels in the actuator root
package, LOG_CRIT, LOG_DEBUG, LOG_ERROR, LOG_INFO, or LOG_WARN
"""
#@TODO: need to add a test for the type of the exec_model_instance
self.exec_mi = exec_model_instance
if config_model_instance is not None and not isinstance(config_model_instance, ConfigModel):
raise ExecutionException("config_model_instance argument isn't an instance of ConfigModel")
self.config_mi = config_model_instance
if namespace_model_instance is not None and not isinstance(namespace_model_instance, NamespaceModel):
raise ExecutionException("namespace_model_instance isn't an instance of NamespaceModel")
self.namespace_mi = namespace_model_instance
if self.config_mi is not None:
self.config_mi.set_namespace(self.namespace_mi)
if infra_model_instance is not None and not isinstance(infra_model_instance, InfraModel):
raise ExecutionException("infra_model_instance isn't an instance of InfraModel")
self.infra_mi = infra_model_instance
root_logger.setLevel(log_level)
self.task_queue = Queue.Queue()
self.node_lock = threading.Lock()
self.stop = False
self.aborted_tasks = []
self.num_tasks_to_perform = None
self.config_record = None
self.num_threads = num_threads
self.do_log = do_log
self.no_delay = no_delay
def record_aborted_task(self, task, etype, value, tb):
"""
Internal; used by a worker thread to report that it is giving up on
performing a task.
@param task: The task that is aborting
@param etype: The aborting exception type
@param value: The exception value
@param tb: The exception traceback object, as returned by sys.exc_info()
"""
self.aborted_tasks.append( (task, etype, value, tb) )
def has_aborted_tasks(self):
"""
Test to see if there are any aborted tasks
"""
return len(self.aborted_tasks) > 0
def get_aborted_tasks(self):
"""
Returns a list of 4-tuples: the task that aborted, the exception type, the exception
value, and the traceback.
"""
return list(self.aborted_tasks)
def perform_task(self, graph, task):
"""
Internal, used to perform a task in graph. Derived classes implement
_perform_task() to supply the actual mechanics of for the underlying
task execution system.
@param graph: an NetworkX DiGraph; needed to find the next tasks
to queue when the current one is done
@param task: The actual task to perform
"""
add_suffix = lambda t, sfx: ("task %s named %s id %s->%s" %
(t.__class__.__name__, t.name, t._id, sfx))
logger = root_logger.getChild(self.exec_agent)
try:
role_name = task.get_task_role().name
if isinstance(role_name, AbstractModelReference):
role_name = role_name.value()
role_id = task.get_task_role()._id
except Exception, _:
role_name = "NO_ROLE"
role_id = ""
logger.info(add_suffix(task, "processing started for role %s(%s)"
% (role_name, role_id)))
if not self.no_delay:
time.sleep(random.uniform(0.2, 2.5))
try_count = 0
success = False
while try_count < task.repeat_count and not success:
try_count += 1
if self.do_log:
logfile=open("{}.{}-try{}.txt".format(task.name, str(task._id)[-4:], try_count), "w")
else:
logfile=None
try:
logger.info(add_suffix(task, "start performing task for role %s(%s)"
% (role_name, role_id)))
self._perform_task(task, logfile=logfile)
logger.info(add_suffix(task, "task succeeded for role %s(%s)"
% (role_name, role_id)))
success = True
except Exception, e:
logger.warning(add_suffix(task, "task failed for role %s(%s)"
% (role_name, role_id)))
msg = ">>>Task Exception for {}!".format(task.name)
if logfile:
logfile.write("{}\n".format(msg))
tb = sys.exc_info()[2]
if try_count < task.repeat_count:
retry_wait = try_count * task.repeat_interval
logger.warning(add_suffix(task, "retrying after %d secs" % retry_wait))
msg = "Retrying {} again in {} secs".format(task.name, retry_wait)
if logfile:
logfile.write("{}\n".format(msg))
traceback.print_exception(type(e), e, tb, file=logfile)
time.sleep(retry_wait)
else:
logger.error(add_suffix(task, "max tries exceeded; task aborting"))
self.record_aborted_task(task, type(e), e, tb)
del tb
sys.exc_clear()
else:
self.node_lock.acquire()
self.num_tasks_to_perform -= 1
if self.num_tasks_to_perform == 0:
self.stop = True
else:
for successor in graph.successors_iter(task):
graph.node[successor]["ins_traversed"] += 1
if graph.in_degree(successor) == graph.node[successor]["ins_traversed"]:
logger.debug(add_suffix(successor, "queueing up for performance"))
self.task_queue.put((graph, successor))
self.node_lock.release()
if logfile:
logfile.flush()
logfile.close()
del logfile
if not success:
# print "ABORTING"
self.abort_process_tasks()
def _perform_task(self, task, logfile=None):
"""
Actually do the task; the default asks the task to perform itself,
which usually means that it does nothing.
"""
task.perform()
def abort_process_tasks(self):
"""
The the agent to abort performing any further tasks.
"""
self.stop = True
def process_tasks(self):
"""
Tell the agent to start performing tasks; results in calls to
self.perform_task()
"""
while not self.stop:
try:
graph, task = self.task_queue.get(block=True, timeout=0.2)
if not self.stop:
self.perform_task(graph, task)
except Queue.Empty, _:
pass
def perform_config(self, completion_record=None):
"""
Start the agent working on the configuration tasks. This is the method
the outside world calls when it wants the agent to start the config
processing process.
@keyword completion_record: currently unused
"""
logger = root_logger.getChild(self.exec_agent)
logger.info("Agent starting task processing")
if self.namespace_mi and self.config_mi:
self.config_mi.update_nexus(self.namespace_mi.nexus)
graph = self.config_mi.get_graph(with_fix=True)
self.num_tasks_to_perform = len(graph.nodes())
for n in graph.nodes():
graph.node[n]["ins_traversed"] = 0
n.fix_arguments()
self.stop = False
#start the workers
logger.info("Starting workers...")
for _ in range(self.num_threads):
worker = threading.Thread(target=self.process_tasks)
worker.start()
logger.info("...workers started")
#queue the initial tasks
for task in (t for t in graph.nodes() if graph.in_degree(t) == 0):
logger.debug("Queueing up %s named %s id %s for performance" %
(task.__class__.__name__, task.name, str(task._id)))
self.task_queue.put((graph, task))
logger.info("Initial tasks queued; waiting for completion")
#now wait to be signaled it finished
while not self.stop:
time.sleep(0.2)
logger.info("Agent task processing complete")
if self.aborted_tasks:
raise self.exception_class("Tasks aborted causing config to abort; see the execution agent's aborted_tasks list for details")
else:
raise ExecutionException("either namespace_model_instance or config_model_instance weren't specified")
| |
# (c) 2018 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
connection: httpapi
short_description: Use httpapi to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over a
HTTP(S)-based api.
version_added: "2.6"
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the HTTP(S)
connection to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the HTTP(S) connection.
- When unspecified, will pick 80 or 443 based on the value of use_ssl.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_httpapi_port
network_os:
description:
- Configures the device platform network operating system. This value is
used to load the correct httpapi plugin to communicate with the remote
device
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the API
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when needed for the device API.
vars:
- name: ansible_password
- name: ansible_httpapi_pass
- name: ansible_httpapi_password
use_ssl:
type: boolean
description:
- Whether to connect using SSL (HTTPS) or not (HTTP).
default: False
vars:
- name: ansible_httpapi_use_ssl
validate_certs:
type: boolean
version_added: '2.7'
description:
- Whether to validate SSL certificates
default: True
vars:
- name: ansible_httpapi_validate_certs
use_proxy:
type: boolean
version_added: "2.9"
description:
- Whether to use https_proxy for requests.
default: True
vars:
- name: ansible_httpapi_use_proxy
timeout:
type: int
description:
- Sets the connection time, in seconds, for communicating with the
remote device. This timeout is used as the default timeout value for
commands when issuing a command to the network CLI. If the command
does not return in timeout seconds, an error is generated.
default: 120
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege
escalation on platforms that support it. Normally this means
transitioning from user mode to C(enable) mode in the CLI session.
If become is set to True and the remote device does not support
privilege escalation or the privilege has already been elevated, then
this option is silently ignored.
- Can be configured from the CLI via the C(--become) or C(-b) options.
default: False
ini:
- section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling
privilege escalation. Typically the become_method value is set to
C(enable) but could be defined as other values.
default: sudo
ini:
- section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received from
target device in the ansible log file. For this option to work 'log_path' ansible
configuration option is required to be set to a file path with write access.
- Be sure to fully understand the security implications of enabling this
option as it could create a security vulnerability by logging sensitive information in log file.
default: False
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
from io import BytesIO
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import cPickle
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import httpapi_loader
from ansible.plugins.connection import NetworkConnectionBase, ensure_connect
class Connection(NetworkConnectionBase):
'''Network API connection'''
transport = 'httpapi'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._url = None
self._auth = None
if self._network_os:
self.httpapi = httpapi_loader.get(self._network_os, self)
if self.httpapi:
self._sub_plugin = {'type': 'httpapi', 'name': self._network_os, 'obj': self.httpapi}
self.queue_message('vvvv', 'loaded API plugin for network_os %s' % self._network_os)
else:
raise AnsibleConnectionFailure('unable to load API plugin for network_os %s' % self._network_os)
else:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
self.queue_message('log', 'network_os is set to %s' % self._network_os)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding='bytes')
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
self.queue_message('vvvv', 'updating play_context for connection')
if self._play_context.become ^ play_context.become:
self.set_become(play_context)
if play_context.become is True:
self.queue_message('vvvv', 'authorizing connection')
else:
self.queue_message('vvvv', 'deauthorizing connection')
self._play_context = play_context
def _connect(self):
if not self.connected:
protocol = 'https' if self.get_option('use_ssl') else 'http'
host = self.get_option('host')
port = self.get_option('port') or (443 if protocol == 'https' else 80)
self._url = '%s://%s:%s' % (protocol, host, port)
self.queue_message('vvv', "ESTABLISH HTTP(S) CONNECTFOR USER: %s TO %s" %
(self._play_context.remote_user, self._url))
self.httpapi.set_become(self._play_context)
self._connected = True
self.httpapi.login(self.get_option('remote_user'), self.get_option('password'))
def close(self):
'''
Close the active session to the device
'''
# only close the connection if its connected.
if self._connected:
self.queue_message('vvvv', "closing http(s) connection to device")
self.logout()
super(Connection, self).close()
@ensure_connect
def send(self, path, data, **kwargs):
'''
Sends the command to the device over api
'''
url_kwargs = dict(
timeout=self.get_option('timeout'), validate_certs=self.get_option('validate_certs'),
use_proxy=self.get_option("use_proxy"),
headers={},
)
url_kwargs.update(kwargs)
if self._auth:
# Avoid modifying passed-in headers
headers = dict(kwargs.get('headers', {}))
headers.update(self._auth)
url_kwargs['headers'] = headers
else:
url_kwargs['force_basic_auth'] = True
url_kwargs['url_username'] = self.get_option('remote_user')
url_kwargs['url_password'] = self.get_option('password')
try:
url = self._url + path
self._log_messages("send url '%s' with data '%s' and kwargs '%s'" % (url, data, url_kwargs))
response = open_url(url, data=data, **url_kwargs)
except HTTPError as exc:
is_handled = self.handle_httperror(exc)
if is_handled is True:
return self.send(path, data, **kwargs)
elif is_handled is False:
raise
else:
response = is_handled
except URLError as exc:
raise AnsibleConnectionFailure('Could not connect to {0}: {1}'.format(self._url + path, exc.reason))
response_buffer = BytesIO()
resp_data = response.read()
self._log_messages("received response: '%s'" % resp_data)
response_buffer.write(resp_data)
# Try to assign a new auth token if one is given
self._auth = self.update_auth(response, response_buffer) or self._auth
response_buffer.seek(0)
return response, response_buffer
| |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import uuid
import warnings
import os
from azure.eventhub.extensions.checkpointstoretable._vendor.data.tables import TableServiceClient
from azure.eventhub.extensions.checkpointstoretable import TableCheckpointStore
from azure.eventhub.exceptions import OwnershipLostError
STORAGE_ENV_KEYS = [
"AZURE_TABLES_CONN_STR",
"AZURE_COSMOS_CONN_STR"
]
def get_live_storage_table_client(conn_str_env_key):
try:
storage_connection_str = os.environ[conn_str_env_key]
table_name = "table{}".format(uuid.uuid4().hex)
table_service_client = TableServiceClient.from_connection_string(
storage_connection_str
)
table_service_client.create_table_if_not_exists(table_name)
return storage_connection_str, table_name
except:
pytest.skip("Storage table client can't be created")
def remove_live_storage_table_client(storage_connection_str, table_name):
try:
table_service_client = TableServiceClient.from_connection_string(
storage_connection_str
)
table_service_client.delete_table(table_name)
except:
warnings.warn(UserWarning("storage table teardown failed"))
def _create_checkpoint(partition_id, offset, sequence_number):
return {
"fully_qualified_namespace": "test_namespace",
"eventhub_name": "eventhub",
"consumer_group": "$default",
"partition_id": str(partition_id),
"offset": offset,
"sequence_number": sequence_number,
}
def _create_ownership(partition_id, owner_id, etag, last_modified_time):
return {
"fully_qualified_namespace": "test_namespace",
"eventhub_name": "eventhub",
"consumer_group": "$default",
"partition_id": str(partition_id),
"owner_id": owner_id,
"etag": etag,
"last_modified_time": last_modified_time,
}
def _claim_ownership_exception_test(storage_connection_str, table_name):
fully_qualified_namespace = "test_namespace"
eventhub_name = "eventhub"
consumer_group = "$default"
ownership_cnt = 8
checkpoint_store = TableCheckpointStore.from_connection_string(
storage_connection_str, table_name
)
ownership_list = []
for i in range(ownership_cnt):
ownership = _create_ownership(str(i), "owner_id", None, None)
ownership_list.append(ownership)
result_ownership_list = checkpoint_store.claim_ownership(ownership_list)
assert result_ownership_list[0]["owner_id"] == "owner_id"
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["owner_id"] = "Bill"
ownership_list = checkpoint_store.claim_ownership(single_ownership)
assert ownership_list[0]["owner_id"] == "Bill"
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["etag"] = "W/\"datetime'2021-08-02T00%3A46%3A51.7645424Z'\""
single_ownership[0]["owner_id"] = "Jack"
single_ownership[0]["partition_id"] = "10"
result_ownership = checkpoint_store.claim_ownership(single_ownership)
list_ownership = checkpoint_store.list_ownership(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert result_ownership[0] in list_ownership
single_ownership = [result_ownership_list[0].copy()]
single_ownership[0]["etag"] = "W/\"datetime'2021-08-02T00%3A46%3A51.7645424Z'\""
with pytest.raises(OwnershipLostError) as e_info:
checkpoint_store.claim_ownership(single_ownership)
def _claim_and_list_ownership(storage_connection_str, table_name):
fully_qualified_namespace = "test_namespace"
eventhub_name = "eventhub"
consumer_group = "$default"
ownership_cnt = 8
checkpoint_store = TableCheckpointStore.from_connection_string(
storage_connection_str, table_name
)
ownership_list = checkpoint_store.list_ownership(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert len(ownership_list) == 0
ownership_list = []
for i in range(ownership_cnt):
ownership = _create_ownership(str(i), "owner_id", None, None)
ownership_list.append(ownership)
result_ownership_list = checkpoint_store.claim_ownership(ownership_list)
assert ownership_list != result_ownership_list
assert len(result_ownership_list) == len(ownership_list)
for i in range(len(ownership_list)):
assert ownership_list[i]["etag"] != result_ownership_list[i]["etag"]
assert (
ownership_list[i]["last_modified_time"]
!= result_ownership_list[i]["last_modified_time"]
)
ownership_list = checkpoint_store.list_ownership(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert len(ownership_list) == ownership_cnt
assert len(ownership_list) == len(result_ownership_list)
for i in range(len(result_ownership_list)):
assert ownership_list[i]["etag"] == result_ownership_list[i]["etag"]
assert (
ownership_list[i]["last_modified_time"]
== result_ownership_list[i]["last_modified_time"]
)
def _update_and_list_checkpoint(storage_connection_str, table_name):
fully_qualified_namespace = "test_namespace"
eventhub_name = "eventhub"
consumer_group = "$default"
partition_cnt = 8
checkpoint_store = TableCheckpointStore.from_connection_string(
storage_connection_str, table_name
)
checkpoint_list = checkpoint_store.list_checkpoints(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert len(checkpoint_list) == 0
for i in range(partition_cnt):
checkpoint = _create_checkpoint(i, 2, 20)
checkpoint_store.update_checkpoint(checkpoint)
checkpoint_list = checkpoint_store.list_checkpoints(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert len(checkpoint_list) == partition_cnt
for checkpoint in checkpoint_list:
assert checkpoint["offset"] == "2"
assert checkpoint["sequence_number"] == 20
checkpoint = _create_checkpoint(0, "30", 42)
checkpoint_store.update_checkpoint(checkpoint)
checkpoint_list = checkpoint_store.list_checkpoints(
fully_qualified_namespace, eventhub_name, consumer_group
)
assert len(checkpoint_list) == partition_cnt
assert checkpoint_list[0]["offset"] == "30"
@pytest.mark.parametrize("conn_str_env_key", STORAGE_ENV_KEYS)
@pytest.mark.liveTest
def test_claim_ownership_exception(conn_str_env_key):
storage_connection_str, table_name = get_live_storage_table_client(
conn_str_env_key
)
try:
_claim_ownership_exception_test(storage_connection_str, table_name)
finally:
remove_live_storage_table_client(storage_connection_str, table_name)
@pytest.mark.parametrize("conn_str_env_key", STORAGE_ENV_KEYS)
@pytest.mark.liveTest
def test_claim_and_list_ownership(conn_str_env_key):
storage_connection_str, table_name = get_live_storage_table_client(
conn_str_env_key
)
try:
_claim_and_list_ownership(storage_connection_str, table_name)
finally:
remove_live_storage_table_client(storage_connection_str, table_name)
@pytest.mark.parametrize("conn_str_env_key", STORAGE_ENV_KEYS)
@pytest.mark.liveTest
def test_update_checkpoint(conn_str_env_key):
storage_connection_str, table_name = get_live_storage_table_client(
conn_str_env_key
)
try:
_update_and_list_checkpoint(storage_connection_str, table_name)
finally:
remove_live_storage_table_client(storage_connection_str, table_name)
| |
"""Config flow for Network UPS Tools (NUT) integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ALIAS,
CONF_BASE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import PyNUTData, find_resources_in_config_entry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
KEY_STATUS,
KEY_STATUS_DISPLAY,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def _base_schema(discovery_info):
"""Generate base schema."""
base_schema = {}
if not discovery_info:
base_schema.update(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor_desc.name
for sensor_id, sensor_desc in SENSOR_TYPES.items()
if sensor_id in available_resources
}
if KEY_STATUS in known_available_resources:
known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[
KEY_STATUS_DISPLAY
].name
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
def _ups_schema(ups_list):
"""UPS selection schema."""
return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from _base_schema with values provided by the user.
"""
host = data[CONF_HOST]
port = data[CONF_PORT]
alias = data.get(CONF_ALIAS)
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
await hass.async_add_executor_job(data.update)
status = data.status
if not status:
raise CannotConnect
return {"ups_list": data.ups_list, "available_resources": status}
def _format_host_port_alias(user_input):
"""Format a host, port, and alias so it can be used for comparison or display."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
alias = user_input.get(CONF_ALIAS)
if alias:
return f"{alias}@{host}:{port}"
return f"{host}:{port}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Network UPS Tools (NUT)."""
VERSION = 1
def __init__(self):
"""Initialize the nut config flow."""
self.nut_config = {}
self.available_resources = {}
self.discovery_info = {}
self.ups_list = None
self.title = None
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered nut device."""
self.discovery_info = discovery_info
await self._async_handle_discovery_without_unique_id()
self.context["title_placeholders"] = {
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
CONF_HOST: discovery_info[CONF_HOST],
}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the user input."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input.update(
{
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT),
}
)
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.nut_config.update(user_input)
if len(info["ups_list"]) > 1:
self.ups_list = info["ups_list"]
return await self.async_step_ups()
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors
)
async def async_step_ups(self, user_input=None):
"""Handle the picking the ups."""
errors = {}
if user_input is not None:
self.nut_config.update(user_input)
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
info, errors = await self._async_validate_or_error(self.nut_config)
if not errors:
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="ups",
data_schema=_ups_schema(self.ups_list),
errors=errors,
)
async def async_step_resources(self, user_input=None):
"""Handle the picking the resources."""
if user_input is None:
return self.async_show_form(
step_id="resources",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.nut_config.update(user_input)
title = _format_host_port_alias(self.nut_config)
return self.async_create_entry(title=title, data=self.nut_config)
def _host_port_alias_already_configured(self, user_input):
"""See if we already have a nut entry matching user input configured."""
existing_host_port_aliases = {
_format_host_port_alias(entry.data)
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return _format_host_port_alias(user_input) in existing_host_port_aliases
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors[CONF_BASE] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors[CONF_BASE] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for nut."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
resources = find_resources_in_config_entry(self.config_entry)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
errors = {}
try:
info = await validate_input(self.hass, self.config_entry.data)
except CannotConnect:
errors[CONF_BASE] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors[CONF_BASE] = "unknown"
if errors:
return self.async_show_form(step_id="abort", errors=errors)
base_schema = _resource_schema_base(info["available_resources"], resources)
base_schema[
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval)
] = cv.positive_int
return self.async_show_form(
step_id="init", data_schema=vol.Schema(base_schema), errors=errors
)
async def async_step_abort(self, user_input=None):
"""Abort options flow."""
return self.async_create_entry(title="", data=self.config_entry.options)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1PodSecurityPolicySpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, allowed_capabilities=None, default_add_capabilities=None, fs_group=None, host_ipc=None, host_network=None, host_pid=None, host_ports=None, privileged=None, read_only_root_filesystem=None, required_drop_capabilities=None, run_as_user=None, se_linux=None, supplemental_groups=None, volumes=None):
"""
V1beta1PodSecurityPolicySpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'allowed_capabilities': 'list[str]',
'default_add_capabilities': 'list[str]',
'fs_group': 'V1beta1FSGroupStrategyOptions',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'host_ports': 'list[V1beta1HostPortRange]',
'privileged': 'bool',
'read_only_root_filesystem': 'bool',
'required_drop_capabilities': 'list[str]',
'run_as_user': 'V1beta1RunAsUserStrategyOptions',
'se_linux': 'V1beta1SELinuxStrategyOptions',
'supplemental_groups': 'V1beta1SupplementalGroupsStrategyOptions',
'volumes': 'list[str]'
}
self.attribute_map = {
'allowed_capabilities': 'allowedCapabilities',
'default_add_capabilities': 'defaultAddCapabilities',
'fs_group': 'fsGroup',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'host_ports': 'hostPorts',
'privileged': 'privileged',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'required_drop_capabilities': 'requiredDropCapabilities',
'run_as_user': 'runAsUser',
'se_linux': 'seLinux',
'supplemental_groups': 'supplementalGroups',
'volumes': 'volumes'
}
self._allowed_capabilities = allowed_capabilities
self._default_add_capabilities = default_add_capabilities
self._fs_group = fs_group
self._host_ipc = host_ipc
self._host_network = host_network
self._host_pid = host_pid
self._host_ports = host_ports
self._privileged = privileged
self._read_only_root_filesystem = read_only_root_filesystem
self._required_drop_capabilities = required_drop_capabilities
self._run_as_user = run_as_user
self._se_linux = se_linux
self._supplemental_groups = supplemental_groups
self._volumes = volumes
@property
def allowed_capabilities(self):
"""
Gets the allowed_capabilities of this V1beta1PodSecurityPolicySpec.
AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
:return: The allowed_capabilities of this V1beta1PodSecurityPolicySpec.
:rtype: list[str]
"""
return self._allowed_capabilities
@allowed_capabilities.setter
def allowed_capabilities(self, allowed_capabilities):
"""
Sets the allowed_capabilities of this V1beta1PodSecurityPolicySpec.
AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
:param allowed_capabilities: The allowed_capabilities of this V1beta1PodSecurityPolicySpec.
:type: list[str]
"""
self._allowed_capabilities = allowed_capabilities
@property
def default_add_capabilities(self):
"""
Gets the default_add_capabilities of this V1beta1PodSecurityPolicySpec.
DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.
:return: The default_add_capabilities of this V1beta1PodSecurityPolicySpec.
:rtype: list[str]
"""
return self._default_add_capabilities
@default_add_capabilities.setter
def default_add_capabilities(self, default_add_capabilities):
"""
Sets the default_add_capabilities of this V1beta1PodSecurityPolicySpec.
DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.
:param default_add_capabilities: The default_add_capabilities of this V1beta1PodSecurityPolicySpec.
:type: list[str]
"""
self._default_add_capabilities = default_add_capabilities
@property
def fs_group(self):
"""
Gets the fs_group of this V1beta1PodSecurityPolicySpec.
FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
:return: The fs_group of this V1beta1PodSecurityPolicySpec.
:rtype: V1beta1FSGroupStrategyOptions
"""
return self._fs_group
@fs_group.setter
def fs_group(self, fs_group):
"""
Sets the fs_group of this V1beta1PodSecurityPolicySpec.
FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
:param fs_group: The fs_group of this V1beta1PodSecurityPolicySpec.
:type: V1beta1FSGroupStrategyOptions
"""
if fs_group is None:
raise ValueError("Invalid value for `fs_group`, must not be `None`")
self._fs_group = fs_group
@property
def host_ipc(self):
"""
Gets the host_ipc of this V1beta1PodSecurityPolicySpec.
hostIPC determines if the policy allows the use of HostIPC in the pod spec.
:return: The host_ipc of this V1beta1PodSecurityPolicySpec.
:rtype: bool
"""
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
"""
Sets the host_ipc of this V1beta1PodSecurityPolicySpec.
hostIPC determines if the policy allows the use of HostIPC in the pod spec.
:param host_ipc: The host_ipc of this V1beta1PodSecurityPolicySpec.
:type: bool
"""
self._host_ipc = host_ipc
@property
def host_network(self):
"""
Gets the host_network of this V1beta1PodSecurityPolicySpec.
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
:return: The host_network of this V1beta1PodSecurityPolicySpec.
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""
Sets the host_network of this V1beta1PodSecurityPolicySpec.
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
:param host_network: The host_network of this V1beta1PodSecurityPolicySpec.
:type: bool
"""
self._host_network = host_network
@property
def host_pid(self):
"""
Gets the host_pid of this V1beta1PodSecurityPolicySpec.
hostPID determines if the policy allows the use of HostPID in the pod spec.
:return: The host_pid of this V1beta1PodSecurityPolicySpec.
:rtype: bool
"""
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
"""
Sets the host_pid of this V1beta1PodSecurityPolicySpec.
hostPID determines if the policy allows the use of HostPID in the pod spec.
:param host_pid: The host_pid of this V1beta1PodSecurityPolicySpec.
:type: bool
"""
self._host_pid = host_pid
@property
def host_ports(self):
"""
Gets the host_ports of this V1beta1PodSecurityPolicySpec.
hostPorts determines which host port ranges are allowed to be exposed.
:return: The host_ports of this V1beta1PodSecurityPolicySpec.
:rtype: list[V1beta1HostPortRange]
"""
return self._host_ports
@host_ports.setter
def host_ports(self, host_ports):
"""
Sets the host_ports of this V1beta1PodSecurityPolicySpec.
hostPorts determines which host port ranges are allowed to be exposed.
:param host_ports: The host_ports of this V1beta1PodSecurityPolicySpec.
:type: list[V1beta1HostPortRange]
"""
self._host_ports = host_ports
@property
def privileged(self):
"""
Gets the privileged of this V1beta1PodSecurityPolicySpec.
privileged determines if a pod can request to be run as privileged.
:return: The privileged of this V1beta1PodSecurityPolicySpec.
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""
Sets the privileged of this V1beta1PodSecurityPolicySpec.
privileged determines if a pod can request to be run as privileged.
:param privileged: The privileged of this V1beta1PodSecurityPolicySpec.
:type: bool
"""
self._privileged = privileged
@property
def read_only_root_filesystem(self):
"""
Gets the read_only_root_filesystem of this V1beta1PodSecurityPolicySpec.
ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.
:return: The read_only_root_filesystem of this V1beta1PodSecurityPolicySpec.
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""
Sets the read_only_root_filesystem of this V1beta1PodSecurityPolicySpec.
ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.
:param read_only_root_filesystem: The read_only_root_filesystem of this V1beta1PodSecurityPolicySpec.
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def required_drop_capabilities(self):
"""
Gets the required_drop_capabilities of this V1beta1PodSecurityPolicySpec.
RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.
:return: The required_drop_capabilities of this V1beta1PodSecurityPolicySpec.
:rtype: list[str]
"""
return self._required_drop_capabilities
@required_drop_capabilities.setter
def required_drop_capabilities(self, required_drop_capabilities):
"""
Sets the required_drop_capabilities of this V1beta1PodSecurityPolicySpec.
RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.
:param required_drop_capabilities: The required_drop_capabilities of this V1beta1PodSecurityPolicySpec.
:type: list[str]
"""
self._required_drop_capabilities = required_drop_capabilities
@property
def run_as_user(self):
"""
Gets the run_as_user of this V1beta1PodSecurityPolicySpec.
runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
:return: The run_as_user of this V1beta1PodSecurityPolicySpec.
:rtype: V1beta1RunAsUserStrategyOptions
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""
Sets the run_as_user of this V1beta1PodSecurityPolicySpec.
runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
:param run_as_user: The run_as_user of this V1beta1PodSecurityPolicySpec.
:type: V1beta1RunAsUserStrategyOptions
"""
if run_as_user is None:
raise ValueError("Invalid value for `run_as_user`, must not be `None`")
self._run_as_user = run_as_user
@property
def se_linux(self):
"""
Gets the se_linux of this V1beta1PodSecurityPolicySpec.
seLinux is the strategy that will dictate the allowable labels that may be set.
:return: The se_linux of this V1beta1PodSecurityPolicySpec.
:rtype: V1beta1SELinuxStrategyOptions
"""
return self._se_linux
@se_linux.setter
def se_linux(self, se_linux):
"""
Sets the se_linux of this V1beta1PodSecurityPolicySpec.
seLinux is the strategy that will dictate the allowable labels that may be set.
:param se_linux: The se_linux of this V1beta1PodSecurityPolicySpec.
:type: V1beta1SELinuxStrategyOptions
"""
if se_linux is None:
raise ValueError("Invalid value for `se_linux`, must not be `None`")
self._se_linux = se_linux
@property
def supplemental_groups(self):
"""
Gets the supplemental_groups of this V1beta1PodSecurityPolicySpec.
SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
:return: The supplemental_groups of this V1beta1PodSecurityPolicySpec.
:rtype: V1beta1SupplementalGroupsStrategyOptions
"""
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
"""
Sets the supplemental_groups of this V1beta1PodSecurityPolicySpec.
SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
:param supplemental_groups: The supplemental_groups of this V1beta1PodSecurityPolicySpec.
:type: V1beta1SupplementalGroupsStrategyOptions
"""
if supplemental_groups is None:
raise ValueError("Invalid value for `supplemental_groups`, must not be `None`")
self._supplemental_groups = supplemental_groups
@property
def volumes(self):
"""
Gets the volumes of this V1beta1PodSecurityPolicySpec.
volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.
:return: The volumes of this V1beta1PodSecurityPolicySpec.
:rtype: list[str]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""
Sets the volumes of this V1beta1PodSecurityPolicySpec.
volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.
:param volumes: The volumes of this V1beta1PodSecurityPolicySpec.
:type: list[str]
"""
self._volumes = volumes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1PodSecurityPolicySpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute evaluation metrics for CuBERT VMR."""
import dataclasses
import re
from typing import Optional, Sequence
from absl import flags
from absl import logging
from plur.eval.plur_eval import PlurEval
from plur.utils import util
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'cubert_vmr_raw_test_total_examples', 0, 'The number of raw test '
'examples, before any pruning of graphination losses. 0 means that the raw '
'number is unavailable, and only the observed numbers will be used for '
'reporting metrics.')
flags.DEFINE_integer(
'cubert_vmr_raw_test_total_buggy_examples', 0,
'The number of raw test buggy '
'examples, before any pruning of graphination losses.'
'0 means that the raw '
'number is unavailable, and only the observed numbers will be used for '
'reporting metrics. This flag must be 0 if and only if '
'`cubert_vmr_raw_test_total_examples` is 0.')
@dataclasses.dataclass
class _PredictionResult():
"""Collects details of a single prediction evaluation."""
target: str
prediction: str
is_buggy: bool
true_positive: bool
true: bool
localized: bool
localized_and_repaired: bool
syntax_error: bool
sequence_match: bool
@dataclasses.dataclass
class _AllResults():
"""Collects statistics about entire evaluation."""
total: int
total_buggy: int
true_positive: float
true: float
localized: float
localized_and_repaired: float
raw_total: Optional[int]
raw_total_buggy: Optional[int]
raw_true_positive: Optional[float]
raw_true: Optional[float]
raw_localized: Optional[float]
raw_localized_and_repaired: Optional[float]
total_syntax_errors: int
total_localized_but_not_repaired: int
sequence_accuracy: float
class CuBertVariableMisuseRepairEval(PlurEval):
"""Eval class for CuBERT VMR dataset.
It computes true positive rate (classification accuracy for the BUGGY class),
classification accuracy (for both classes together), localization accuracy,
and localization and repair accuracy.
"""
_RE = re.compile(r'^POINTER\((\d*),.*$')
def __init__(self,
prediction_file: str,
target_file: str,
top_n: int = 1) -> None:
"""As per superclass."""
assert top_n == 1
super().__init__(prediction_file, target_file, top_n=top_n)
def parse_pointer(self, line: str) -> Optional[int]:
"""Parses a target/prediction line produced during evaluation."""
match = self._RE.match(line)
if not match:
logging.warning('Failed to match %r', line)
return None
matching_groups = list(match.groups())
if len(matching_groups) != 1:
logging.warning('Did not find exactly one POINTER match in %r.', line)
return None
error_pointer = int(matching_groups[0])
return error_pointer
def compute_metric_for_one_target(self, beam_prediction_lines: Sequence[str],
target_line: str) -> _PredictionResult:
"""As per superclass. Returs class and correctness."""
assert len(beam_prediction_lines) == 1
line = beam_prediction_lines[0]
actual = line.strip()
expected = target_line.strip()
expected_error_pointer = self.parse_pointer(expected)
assert expected_error_pointer is not None, (
f'Ground truth should be well-formed, but was not in {expected}')
expected_buggy = expected_error_pointer != 1
actual_error_pointer = self.parse_pointer(actual)
if actual_error_pointer is None:
# The model didn't produce a valid input pointer first. Fail everything.
return _PredictionResult(
target=expected,
prediction=actual,
is_buggy=expected_buggy,
syntax_error=True,
sequence_match=False,
true_positive=False,
true=False,
localized=False,
localized_and_repaired=False)
actual_buggy = actual_error_pointer != 1
true_positive = actual_buggy and expected_buggy
true = actual_buggy == expected_buggy
localized = (
actual_buggy and (actual_error_pointer == expected_error_pointer))
localized_and_repaired = actual_buggy and (actual == expected)
sequence_match = actual == expected
result = _PredictionResult(
target=expected,
prediction=actual,
is_buggy=expected_buggy,
syntax_error=False,
true_positive=true_positive,
true=true,
sequence_match=sequence_match,
localized=localized,
localized_and_repaired=localized_and_repaired)
if localized and not localized_and_repaired:
logging.log_every_n(logging.INFO, f'Repair failed: {result}', 100)
return result
def evaluate_once(self, grouped_prediction_lines, target_lines):
# Sum all numbers for predictions and compute the final metric.
true_positive = 0
true = 0
localized = 0
localized_and_repaired = 0
total = 0
total_buggy = 0
total_syntax_errors = 0
matches = 0
for beam_prediction_lines, target_line in zip(
grouped_prediction_lines, target_lines):
result = self.compute_metric_for_one_target(
beam_prediction_lines=beam_prediction_lines, target_line=target_line)
total_buggy += result.is_buggy
total += 1
matches += result.sequence_match
if result.syntax_error:
total_syntax_errors += 1
else:
true_positive += result.true_positive
true += result.true
localized += result.localized
localized_and_repaired += result.localized_and_repaired
raw_total = FLAGS.cubert_vmr_raw_test_total_examples
raw_total_buggy = FLAGS.cubert_vmr_raw_test_total_buggy_examples
if bool(raw_total) != bool(raw_total_buggy):
raise AssertionError('The flags `raw_test_total_examples` and '
'`raw_test_total_buggy_examples` must either both '
'be set or both unset (or 0), but instead, they are '
f'({raw_total} and {raw_total_buggy}, respectively.')
if raw_total:
# TODO(maniatis): Split _AllResults from _AllMetrics to avoid inconsistent
# computation.
return _AllResults(
total=total,
total_buggy=total_buggy,
true=util.safe_division(true, total),
sequence_accuracy=util.safe_division(matches, total),
true_positive=util.safe_division(true_positive, total_buggy),
localized=util.safe_division(localized, total_buggy),
localized_and_repaired=util.safe_division(localized_and_repaired,
total_buggy),
raw_total=raw_total,
raw_total_buggy=raw_total_buggy,
raw_true=util.safe_division(true, raw_total),
raw_true_positive=util.safe_division(true_positive, raw_total_buggy),
raw_localized=util.safe_division(localized, raw_total_buggy),
raw_localized_and_repaired=util.safe_division(localized_and_repaired,
raw_total_buggy),
total_localized_but_not_repaired=localized - localized_and_repaired,
total_syntax_errors=total_syntax_errors)
else:
return _AllResults(
total=total,
total_buggy=total_buggy,
true=util.safe_division(true, total),
sequence_accuracy=util.safe_division(matches, total),
true_positive=util.safe_division(true_positive, total_buggy),
localized=util.safe_division(localized, total_buggy),
localized_and_repaired=util.safe_division(localized_and_repaired,
total_buggy),
raw_total=None,
raw_total_buggy=None,
raw_true=None,
raw_true_positive=None,
raw_localized=None,
raw_localized_and_repaired=None,
total_localized_but_not_repaired=localized - localized_and_repaired,
total_syntax_errors=total_syntax_errors)
def get_metric_as_string(self) -> str:
all_results = self.evaluate()
return str(all_results)
| |
import logging
import os
from collections import OrderedDict
from celery.task.control import revoke
from django.conf import settings
from django.contrib.auth.models import Group
from django.db import models
from django.db.utils import OperationalError, ProgrammingError, InternalError, DataError
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from rest_framework.exceptions import ValidationError
from jsonfield import JSONField
from daiquiri.core.adapter import DatabaseAdapter, DownloadAdapter
from daiquiri.core.constants import ACCESS_LEVEL_CHOICES
from daiquiri.core.generators import generate_votable
from daiquiri.jobs.models import Job
from daiquiri.jobs.managers import JobManager
from daiquiri.jobs.exceptions import JobError
from daiquiri.files.utils import check_file
from daiquiri.stats.models import Record
from .managers import QueryJobManager, ExampleManager
from .utils import (
get_format_config,
get_job_sources,
get_job_columns
)
from .process import (
check_quota,
check_number_of_active_jobs,
process_schema_name,
process_table_name,
process_query_language,
process_queue,
process_response_format,
translate_query,
process_query,
process_display_columns,
check_permissions,
)
from .tasks import (
run_query,
run_ingest,
create_download_file,
create_archive_file,
rename_table,
drop_table,
abort_query
)
logger = logging.getLogger(__name__)
query_logger = logging.getLogger('query')
class QueryJob(Job):
objects = QueryJobManager()
schema_name = models.CharField(max_length=256)
table_name = models.CharField(max_length=256)
queue = models.CharField(max_length=16, blank=True)
query_language = models.CharField(max_length=16, blank=True)
query = models.TextField(blank=True)
native_query = models.TextField(blank=True)
actual_query = models.TextField(blank=True)
queue = models.CharField(max_length=16, blank=True)
nrows = models.BigIntegerField(null=True, blank=True)
size = models.BigIntegerField(null=True, blank=True)
metadata = JSONField(null=True, blank=True)
uploads = JSONField(null=True, blank=True)
pid = models.IntegerField(null=True, blank=True)
class Meta:
ordering = ('start_time', )
verbose_name = _('QueryJob')
verbose_name_plural = _('QueryJobs')
@property
def parameters(self):
return {
'schema_name': self.schema_name,
'table_name': self.table_name,
'query_language': self.query_language,
'query': self.query,
'native_query': self.native_query,
'actual_query': self.actual_query,
'queue': self.queue,
'nrows': self.nrows,
'size': self.size
}
@property
def formats(self):
return OrderedDict((item['key'], item['content_type']) for item in settings.QUERY_DOWNLOAD_FORMATS)
@property
def result_status(self):
return 'OK' if self.max_records is None else 'OVERFLOW'
@property
def quote(self):
return None
@property
def time_queue(self):
if self.start_time and self.creation_time:
return (self.start_time - self.creation_time).total_seconds()
else:
return None
@property
def time_query(self):
if self.end_time and self.start_time:
return (self.end_time - self.start_time).total_seconds()
else:
return None
@property
def timeout(self):
if self.queue:
return next((queue['timeout'] for queue in settings.QUERY_QUEUES if queue['key'] == self.queue))
else:
return 10
@property
def priority(self):
return next((queue['priority'] for queue in settings.QUERY_QUEUES if queue['key'] == self.queue))
@cached_property
def column_names(self):
return [column['name'] for column in self.metadata['columns']]
def process(self, upload=False):
# log the query to the query log
query_logger.info('"%s" %s %s', self.query, self.query_language, self.owner or 'anonymous')
# check quota and number of active jobs
check_quota(self)
check_number_of_active_jobs(self)
# process schema_name, table_name and response format
self.schema_name = process_schema_name(self.owner, self.schema_name)
self.table_name = process_table_name(self.table_name)
self.response_format = process_response_format(self.response_format)
if upload:
self.query = ''
self.query_language = ''
self.queue = ''
self.execution_duration = 0.0
else:
self.query_language = process_query_language(self.owner, self.query_language)
self.queue = process_queue(self.owner, self.queue)
self.response_format = process_response_format(self.response_format)
# set the execution_duration to the queues timeout
self.execution_duration = self.timeout
# log the input query to the debug log
logger.debug('query = "%s"', self.query)
# translate the query from adql
translated_query = translate_query(self.query_language, self.query)
# log the translated query to the debug log
logger.debug('translated_query = "%s"', translated_query)
processor = process_query(translated_query)
# log the processor output to the debug log
logger.debug('native_query = "%s"', processor.query)
logger.debug('processor.keywords = %s', processor.keywords)
logger.debug('processor.tables = %s', processor.tables)
logger.debug('processor.columns = %s', processor.columns)
logger.debug('processor.functions = %s', processor.functions)
# check permissions
permission_messages = check_permissions(
self.owner,
processor.keywords,
processor.tables,
processor.columns,
processor.functions
)
if permission_messages:
raise ValidationError({
'query': permission_messages
})
# initialize metadata and store map of aliases
self.metadata = {
'display_columns': process_display_columns(processor.display_columns),
'tables': processor.tables
}
# get the native query from the processor (without trailing semicolon)
self.native_query = processor.query.rstrip(';')
# set clean flag
self.is_clean = True
def run(self):
if not self.is_clean:
raise Exception('job.process() was not called.')
if self.phase == self.PHASE_PENDING:
self.phase = self.PHASE_QUEUED
self.save()
# start the submit_query task in a syncronous or asuncronous way
job_id = str(self.id)
if not settings.ASYNC:
logger.info('job %s submitted (sync)' % self.id)
run_query.apply((job_id, ), task_id=job_id, throw=True)
else:
logger.info('job %s submitted (async, queue=query, priority=%s)' % (self.id, self.priority))
run_query.apply_async((job_id, ), task_id=job_id, queue='query', priority=self.priority)
else:
raise ValidationError({
'phase': ['Job is not PENDING.']
})
def run_sync(self):
adapter = DatabaseAdapter()
self.actual_query = adapter.build_sync_query(
self.native_query,
settings.QUERY_SYNC_TIMEOUT,
self.max_records
)
job_sources = get_job_sources(self)
# create a stats record for this job
Record.objects.create(
time=now(),
resource_type='QUERY',
resource={
'job_id': None,
'job_type': self.job_type,
'query': self.query,
'query_language': self.query_language,
'sources': job_sources
},
client_ip=self.client_ip,
user=self.owner
)
try:
download_adapter = DownloadAdapter()
yield from generate_votable(adapter.fetchall(self.actual_query), get_job_columns(self),
table=download_adapter.get_table_name(self.schema_name, self.table_name),
infos=download_adapter.get_infos('OK', self.query, self.query_language, job_sources),
links=download_adapter.get_links(job_sources))
self.drop_uploads()
except (OperationalError, ProgrammingError, InternalError, DataError) as e:
raise StopIteration()
def ingest(self, file_path):
if self.phase == self.PHASE_PENDING:
self.phase = self.PHASE_QUEUED
self.save()
if not settings.ASYNC:
run_ingest.apply((self.id, file_path), throw=True)
else:
run_ingest.apply_async((self.id, file_path), queue='download')
else:
raise ValidationError({
'phase': ['Job is not PENDING.']
})
def abort(self):
if settings.ASYNC:
# first, revoke the task in celery, regardless the phase
revoke(str(self.id))
current_phase = self.phase
if current_phase in self.PHASE_ACTIVE:
# next, set the phase to ABORTED
self.phase = self.PHASE_ABORTED
self.save()
# finally, abort query, this will trigger OperationalError in the run_query task
if current_phase == self.PHASE_EXECUTING:
self.abort_query()
def archive(self):
self.abort()
self.drop_table()
self.drop_uploads()
self.phase = self.PHASE_ARCHIVED
self.nrows = None
self.size = None
self.save()
def rename_table(self, new_table_name):
if self.table_name != new_table_name:
self.metadata['name'] = new_table_name
self.save()
task_args = (self.schema_name, self.table_name, new_table_name)
if not settings.ASYNC:
rename_table.apply(task_args, throw=True)
else:
rename_table.apply_async(task_args)
def drop_table(self):
task_args = (self.schema_name, self.table_name)
if not settings.ASYNC:
drop_table.apply(task_args, throw=True)
else:
drop_table.apply_async(task_args)
def drop_uploads(self):
if self.uploads:
for table_name, file_path in self.uploads.items():
task_args = (settings.TAP_UPLOAD, table_name)
if not settings.ASYNC:
drop_table.apply(task_args, throw=True)
else:
drop_table.apply_async(task_args)
def abort_query(self):
task_args = (self.pid, )
if not settings.ASYNC:
abort_query.apply(task_args, throw=True)
else:
abort_query.apply_async(task_args)
def stream(self, format_key):
if self.phase == self.PHASE_COMPLETED:
return DownloadAdapter().generate(
format_key,
self.metadata.get('columns', []),
sources=self.metadata.get('sources', []),
schema_name=self.schema_name,
table_name=self.table_name,
nrows=self.nrows,
query_status=self.result_status,
query=self.query,
query_language=self.query_language
)
else:
raise ValidationError({
'phase': ['Job is not COMPLETED.']
})
def rows(self, column_names, ordering, page, page_size, search, filters):
if self.phase == self.PHASE_COMPLETED:
# check if the columns are actually in the jobs table
errors = {}
for column_name in column_names:
if column_name not in self.column_names:
errors[column_name] = _('Column not found.')
if errors:
raise ValidationError(errors)
# get database adapter
adapter = DatabaseAdapter()
try:
# query the database for the total number of rows
count = adapter.count_rows(self.schema_name, self.table_name, column_names, search, filters)
# query the paginated rowset
rows = adapter.fetch_rows(self.schema_name, self.table_name, column_names, ordering, page, page_size, search, filters)
# flatten the list if only one column is retrieved
if len(column_names) == 1:
return count, [element for row in rows for element in row]
else:
return count, rows
except ProgrammingError:
return 0, []
else:
raise ValidationError({
'phase': ['Job is not COMPLETED.']
})
def columns(self):
if self.metadata:
return self.metadata.get('columns', [])
else:
return []
class DownloadJob(Job):
objects = JobManager()
job = models.ForeignKey(
QueryJob, related_name='downloads', on_delete=models.CASCADE,
verbose_name=_('QueryJob'),
help_text=_('QueryJob this DownloadJob belongs to.')
)
format_key = models.CharField(
max_length=32,
verbose_name=_('Format key'),
help_text=_('Format key for this download.')
)
class Meta:
ordering = ('start_time', )
verbose_name = _('DownloadJob')
verbose_name_plural = _('DownloadJobs')
@property
def file_path(self):
if not self.owner:
username = 'anonymous'
else:
username = self.owner.username
format_config = get_format_config(self.format_key)
if format_config:
directory_name = os.path.join(settings.QUERY_DOWNLOAD_DIR, username)
return os.path.join(directory_name, '%s.%s' % (self.job.table_name, format_config['extension']))
else:
return None
def process(self):
if self.job.phase == self.PHASE_COMPLETED:
self.owner = self.job.owner
else:
raise ValidationError({
'phase': ['Job is not COMPLETED.']
})
# set clean flag
self.is_clean = True
def run(self):
if not self.is_clean:
raise Exception('download_job.process() was not called.')
if self.phase == self.PHASE_PENDING:
self.phase = self.PHASE_QUEUED
self.save()
download_id = str(self.id)
if not settings.ASYNC:
logger.info('download_job %s submitted (sync)' % download_id)
create_download_file.apply((download_id, ), task_id=download_id, throw=True)
else:
logger.info('download_job %s submitted (async, queue=download)' % download_id)
create_download_file.apply_async((download_id, ), task_id=download_id, queue='download')
else:
raise ValidationError({
'phase': ['Job is not PENDING.']
})
def delete_file(self):
try:
if self.file_path is not None:
os.remove(self.file_path)
except OSError:
pass
class QueryArchiveJob(Job):
objects = JobManager()
job = models.ForeignKey(
QueryJob, related_name='archives', on_delete=models.CASCADE,
verbose_name=_('QueryJob'),
help_text=_('QueryJob this ArchiveJob belongs to.')
)
column_name = models.CharField(
max_length=32,
verbose_name=_('Column name'),
help_text=_('Column name for this download.')
)
files = JSONField(
verbose_name=_('Files'),
help_text=_('List of files in the archive.')
)
class Meta:
ordering = ('start_time', )
verbose_name = _('QueryArchiveJob')
verbose_name_plural = _('QueryArchiveJob')
@property
def file_path(self):
if not self.owner:
username = 'anonymous'
else:
username = self.owner.username
directory_name = os.path.join(settings.QUERY_DOWNLOAD_DIR, username)
return os.path.join(directory_name, '%s.%s.zip' % (self.job.table_name, self.column_name))
def process(self):
if self.job.phase == self.PHASE_COMPLETED:
self.owner = self.job.owner
else:
raise ValidationError({
'phase': ['Job is not COMPLETED.']
})
if not self.column_name:
raise ValidationError({
'column_name': [_('This field may not be blank.')]
})
if self.column_name not in self.job.column_names:
raise ValidationError({
'column_name': [_('Unknown column "%s".') % self.column_name]
})
# get database adapter and query the paginated rowset
rows = DatabaseAdapter().fetch_rows(self.job.schema_name, self.job.table_name, [self.column_name], page_size=0)
# prepare list of files for this job
files = []
for row in rows:
file_path = row[0]
# append the file to the list of files if it exists
if file_path and check_file(self.owner, file_path):
files.append(file_path)
else:
raise ValidationError({
'files': [_('One or more of the files cannot be found.')]
})
# set files for this job
self.files = files
# set clean flag
self.is_clean = True
def run(self):
if not self.is_clean:
raise Exception('download_job.process() was not called.')
if self.phase == self.PHASE_PENDING:
self.phase = self.PHASE_QUEUED
self.save()
archive_id = str(self.id)
if not settings.ASYNC:
logger.info('archive_job %s submitted (sync)' % archive_id)
create_archive_file.apply((archive_id, ), task_id=archive_id, throw=True)
else:
logger.info('archive_job %s submitted (async, queue=download)' % archive_id)
create_archive_file.apply_async((archive_id, ), task_id=archive_id, queue='download')
else:
raise ValidationError({
'phase': ['Job is not PENDING.']
})
def delete_file(self):
try:
os.remove(self.file_path)
except OSError:
pass
class Example(models.Model):
objects = ExampleManager()
order = models.IntegerField(
null=True, blank=True,
verbose_name=_('Order'),
help_text=_('Position in lists.')
)
name = models.CharField(
max_length=256,
verbose_name=_('Name'),
help_text=_('Identifier of the example.')
)
description = models.TextField(
null=True, blank=True,
verbose_name=_('Description'),
help_text=_('A brief description of the example to be displayed in the user interface.')
)
query_language = models.CharField(
max_length=16,
verbose_name=_('Query language'),
help_text=_('The query language for this example.')
)
query_string = models.TextField(
verbose_name=_('Query string'),
help_text=_('The query string (SQL) for this example.')
)
access_level = models.CharField(
max_length=8, choices=ACCESS_LEVEL_CHOICES,
verbose_name=_('Access level')
)
groups = models.ManyToManyField(
Group, blank=True,
verbose_name=_('Groups'),
help_text=_('The groups which have access to the examples.')
)
class Meta:
ordering = ('order',)
verbose_name = _('Example')
verbose_name_plural = _('Examples')
def __str__(self):
return self.name
| |
import sys
import subprocess
import threading
import logging
import importlib
from collections import OrderedDict, namedtuple
from django.core.management import call_command
from django.conf import settings
from django.apps import apps
from biohub.utils import module as module_util
from biohub.utils.collections import unique
from biohub.core.conf import settings as biohub_settings, dump_config
from .config import PluginConfig
from . import exceptions
logger = logging.getLogger('biohub.plugins')
REQUIRED_PROPERTIES = ('title', 'author', 'description',)
PluginInfo = namedtuple('PluginInfo', REQUIRED_PROPERTIES)
def validate_plugin_config(plugin_name, config_class):
"""
Check if given plugin config class is valid.
A plugin config class is valid if:
+ it's a subclass of `biohub.core.plugins.PluginConfig`
+ it has all the properties specified in `REQUIRED_PROPERTIES`
"""
if not issubclass(config_class, PluginConfig):
raise exceptions.InstallationError(
"The default config class of plugin '%s' is not a "
"subclass of `biohub.core.plugins.PluginConfig`."
% plugin_name)
missing_fields = set(REQUIRED_PROPERTIES) - set(dir(config_class))
if missing_fields:
raise exceptions.InstallationError(
"Required fields %s missing in config of plugin '%s'"
% (', '.join(missing_fields), plugin_name))
class PluginManager(object):
def __init__(self):
self.plugins = OrderedDict()
self.plugin_infos = OrderedDict()
self._install_lock = threading.Lock()
self._db_lock = threading.Lock()
@property
def installing(self):
"""
Returns a bool indicating whether plugin list is mutating.
"""
return self._install_lock.locked
@property
def migrating(self):
"""
Returns a bool indicating whether there are any plugins migrating their
models.
"""
return self._db_lock.locked
@property
def available_plugins(self):
"""
An alias to biohub_settings.BIOHUB_PLUGINS
"""
if not hasattr(self, '_available_plugins'):
self._available_plugins = biohub_settings.BIOHUB_PLUGINS
return self._available_plugins
@property
def apps_to_populate(self):
"""
Returns the names of apps to be populated.
"""
return unique(settings.INSTALLED_APPS + self.available_plugins)
def populate_plugins(self, plugin_names):
"""
Update plugins storage after new plugins installed.
"""
for app_config in apps.app_configs.values():
if app_config.name in plugin_names:
self._populate_plugin(app_config.name, app_config)
def _populate_plugin(self, plugin_name, plugin_config):
"""
Populate a single plugin.
"""
self.plugins[plugin_name] = plugin_config
properties = (getattr(plugin_config, pname)
for pname in REQUIRED_PROPERTIES)
self.plugin_infos[plugin_name] = PluginInfo(*properties)
def _validate_plugins(self, plugin_names):
"""
Filter and wipe out existing plugins.
"""
plugin_names = [plugin for plugin in plugin_names
if plugin not in self.available_plugins]
for name in plugin_names:
if not module_util.is_valid_module_path(name):
raise exceptions.InstallationError(
"'%s' is not a valid dot-separated python module path."
% name)
# Get the plugin config class
plugin_config_path = module_util.module_from_path(name)\
.default_app_config
plugin_config_class = module_util.object_from_path(
plugin_config_path)
validate_plugin_config(name, plugin_config_class)
return plugin_names
def _invalidate_urlconf(self):
"""
To invalidate url patterns after plugin list mutated.
The function will do the following things:
+ invalidate resolver's LRU cache (use `.cache_clear` provided by
`lru_cache`)
+ reload main urlconf module and biohub url patterns registration
module
+ reload `urls.py` in each app, using a force-reload version of
`autodiscover_module`
+ override default resolver's `urlconf_module` and `url_patterns`
attributes, which are cached property and must be explicitly
assigned
"""
from django.urls.resolvers import get_resolver
import biohub.core.routes
import biohub.main.urls
try:
get_resolver.cache_clear()
importlib.reload(biohub.core.routes)
main_urls = importlib.reload(biohub.main.urls)
module_util.autodiscover_modules('urls')
resolver = get_resolver()
resolver.urlconf_module = main_urls
resolver.url_patterns = getattr(
main_urls, "urlpatterns", main_urls)
except Exception as e:
raise exceptions.URLConfError(e)
def remove(self, plugin_names,
update_config=False,
invalidate_urlconf=True):
"""
Remove a list of plugins specified by `plugin_names`.
"""
with self._install_lock:
removed = []
for plugin_name in plugin_names:
try:
self.available_plugins.remove(plugin_name)
removed.append(plugin_name)
except ValueError:
pass
# halt if no plugins to be REALLY removed
if not removed:
return removed
if apps.ready:
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = False
try:
apps.clear_cache()
apps.populate(self.apps_to_populate)
except Exception as e:
raise exceptions.RemovalError(e)
if invalidate_urlconf:
self._invalidate_urlconf()
# update plugin infos
for plugin_name in removed:
self.plugin_infos.pop(plugin_name, 0)
if update_config:
dump_config()
return removed
def install(self, plugin_names,
update_config=False,
invalidate_urlconf=True,
migrate_database=False, migrate_options=None):
"""
Install a list of plugins specified by `plugin_names`.
`plugin_names` should be a list of dot-separated python module path,
existing plugins will be ignored.
By setting `invalidate_urlconf` to True (default), biohub will
automatically update the URLConf after the plugins populated.
By setting `migrate_database` to True (default to False), biohub will
automatically migrate models of the plugins. Extra migration options
can be specified using `migrate_options` (should be a dict).
"""
plugin_names = self._validate_plugins(plugin_names)
with self._install_lock:
if not plugin_names:
return plugin_names
# Update django apps list.
if not apps.ready:
raise exceptions.InstallationError(
"Django app registry isn't ready yet.")
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = False
try:
apps.clear_cache()
apps.populate(self.apps_to_populate + plugin_names)
except Exception as e:
raise exceptions.InstallationError(e)
# Invalidate URLConf
if invalidate_urlconf:
self._invalidate_urlconf()
# Migrate database
if migrate_database:
self.prepare_database(plugin_names, **(migrate_options or {}))
self.populate_plugins(plugin_names)
self.available_plugins.extend(plugin_names)
if update_config:
dump_config()
return plugin_names
def prepare_database(self, plugin_names, new_process=False,
no_input=False, no_output=False,
verbosity=0, test=False):
"""
The function is to migrate models of plugins specified by
`plugin_names`.
Note that it will not check whether the plugins are installed or not.
The function is actually a wrapper of biohub command `migrateplugin`.
By setting `new_process` to True (default to False), the function will
start up the migration in a subprocess instead of directly calling
`call_command`. This may be useful while testing, especially if your
test runner bans migration.
The rest arguments has the same meaning as those in command
`migrateplugin`.
"""
with self._db_lock:
try:
for plugin in plugin_names:
self._migrate_plugin(
plugin, new_process,
no_input, no_output,
verbosity, test)
except exceptions.DatabaseError as e:
raise e
except Exception as e:
raise exceptions.DatabaseError(e)
def _migrate_plugin(self, plugin_name, new_process=False,
no_input=False, no_output=False,
verbosity=0, test=False):
if new_process:
args = filter(bool, [
'manage.py',
'migrateplugin',
plugin_name,
'--verbosity=%s' % verbosity,
'--no-input' if no_input else '',
'--no-output' if no_output else '',
'--test' if test else ''
])
p = subprocess.Popen(
list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
sys.stdout.write(stdout.decode('utf-8'))
sys.stderr.write(stderr.decode('utf-8'))
if p.returncode != 0:
raise exceptions.DatabaseError(
'Migration processreturns non-zero exit code.')
else:
call_command(
'migrateplugin', plugin_name,
interactive=no_input,
no_output=no_output,
verbosity=verbosity,
test=test)
manager = PluginManager()
| |
#
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
OMCI Message support
"""
import sys
import arrow
from twisted.internet import reactor, defer
from twisted.internet.defer import TimeoutError, CancelledError, failure, fail, succeed, inlineCallbacks
from common.frameio.frameio import hexify
from voltha.extensions.omci.omci import *
from voltha.extensions.omci.omci_me import OntGFrame, OntDataFrame, SoftwareImageFrame
from voltha.extensions.omci.me_frame import MEFrame
from voltha.extensions.omci.omci_defs import EntityOperations, ReasonCodes
from common.event_bus import EventBusClient
from enum import IntEnum
from binascii import hexlify
def hexify(buffer):
"""Return a hexadecimal string encoding of input buffer"""
return ''.join('%02x' % ord(c) for c in buffer)
DEFAULT_OMCI_TIMEOUT = 3 # Seconds
MAX_OMCI_REQUEST_AGE = 60 # Seconds
DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE = 31 # Bytes
CONNECTED_KEY = 'connected'
TX_REQUEST_KEY = 'tx-request'
RX_RESPONSE_KEY = 'rx-response'
UNKNOWN_CLASS_ATTRIBUTE_KEY = 'voltha-unknown-blob'
class OmciCCRxEvents(IntEnum):
AVC_Notification = 0,
MIB_Upload = 1,
MIB_Upload_Next = 2,
Create = 3,
Delete = 4,
Set = 5,
Alarm_Notification = 6,
Test_Result = 7,
MIB_Reset = 8,
Connectivity = 9,
Get_ALARM_Get = 10,
Get_ALARM_Get_Next = 11,
Start_Software_Download = 12,
Download_Section = 13,
End_Software_Download = 14,
Activate_Software = 15,
Commit_Software = 15,
# abbreviations
OP = EntityOperations
RxEvent = OmciCCRxEvents
class OMCI_CC(object):
""" Handle OMCI Communication Channel specifics for Adtran ONUs"""
MIN_OMCI_TX_ID_LOW_PRIORITY = 0x0001 # 2 Octets max
MAX_OMCI_TX_ID_LOW_PRIORITY = 0x7FFF # 2 Octets max
MIN_OMCI_TX_ID_HIGH_PRIORITY = 0x8000 # 2 Octets max
MAX_OMCI_TX_ID_HIGH_PRIORITY = 0xFFFF # 2 Octets max
LOW_PRIORITY = 0
HIGH_PRIORITY = 1
# Offset into some tuples for pending lists and tx in progress
PENDING_DEFERRED = 0
PENDING_FRAME = 1
PENDING_TIMEOUT = 2
PENDING_RETRY = 3
REQUEST_TIMESTAMP = 0
REQUEST_DEFERRED = 1
REQUEST_FRAME = 2
REQUEST_TIMEOUT = 3
REQUEST_RETRY = 4
REQUEST_DELAYED_CALL = 5
_frame_to_event_type = {
OmciMibResetResponse.message_id: RxEvent.MIB_Reset,
OmciMibUploadResponse.message_id: RxEvent.MIB_Upload,
OmciMibUploadNextResponse.message_id: RxEvent.MIB_Upload_Next,
OmciCreateResponse.message_id: RxEvent.Create,
OmciDeleteResponse.message_id: RxEvent.Delete,
OmciSetResponse.message_id: RxEvent.Set,
OmciGetAllAlarmsResponse.message_id: RxEvent.Get_ALARM_Get,
OmciGetAllAlarmsNextResponse.message_id: RxEvent.Get_ALARM_Get_Next
}
def __init__(self, adapter_agent, device_id, me_map=None,
clock=None):
self.log = structlog.get_logger(device_id=device_id)
self._adapter_agent = adapter_agent
self._device_id = device_id
self._proxy_address = None
self._enabled = False
self._extended_messaging = False
self._me_map = me_map
if clock is None:
self.reactor = reactor
else:
self.reactor = clock
# Support 2 levels of priority since only baseline message set supported
self._tx_tid = [OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY, OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY]
self._tx_request = [None, None] # Tx in progress (timestamp, defer, frame, timeout, retry, delayedCall)
self._pending = [list(), list()] # pending queue (deferred, tx_frame, timeout, retry)
self._rx_response = [None, None]
# Statistics
self._tx_frames = 0
self._rx_frames = 0
self._rx_unknown_tid = 0 # Rx OMCI with no Tx TID match
self._rx_onu_frames = 0 # Autonomously generated ONU frames
self._rx_onu_discards = 0 # Autonomously generated ONU unknown message types
self._rx_timeouts = 0
self._rx_late = 0 # Frame response received after timeout on Tx
self._rx_unknown_me = 0 # Number of managed entities Rx without a decode definition
self._tx_errors = 0 # Exceptions during tx request
self._consecutive_errors = 0 # Rx & Tx errors in a row, a good RX resets this to 0
self._reply_min = sys.maxint # Fastest successful tx -> rx
self._reply_max = 0 # Longest successful tx -> rx
self._reply_sum = 0.0 # Total seconds for successful tx->rx (float for average)
self._max_hp_tx_queue = 0 # Maximum size of high priority tx pending queue
self._max_lp_tx_queue = 0 # Maximum size of low priority tx pending queue
self.event_bus = EventBusClient()
# If a list of custom ME Entities classes were provided, insert them into
# main class_id to entity map.
# TODO: If this class becomes hidden from the ONU DA, move this to the OMCI State Machine runner
def __str__(self):
return "OMCISupport: {}".format(self._device_id)
def _get_priority_index(self, high_priority):
""" Centralized logic to help make extended message support easier in the future"""
return OMCI_CC.HIGH_PRIORITY if high_priority and not self._extended_messaging \
else OMCI_CC.LOW_PRIORITY
def _tid_is_high_priority(self, tid):
""" Centralized logic to help make extended message support easier in the future"""
return not self._extended_messaging and \
OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY <= tid <= OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
@staticmethod
def event_bus_topic(device_id, event):
"""
Get the topic name for a given event Frame Type
:param device_id: (str) ONU Device ID
:param event: (OmciCCRxEvents) Type of event
:return: (str) Topic string
"""
assert event in OmciCCRxEvents, \
'Event {} is not an OMCI-CC Rx Event'.format(event.name)
return 'omci-rx:{}:{}'.format(device_id, event.name)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Enable/disable the OMCI Communications Channel
:param value: (boolean) True to enable, False to disable
"""
assert isinstance(value, bool), 'enabled is a boolean'
if self._enabled != value:
self._enabled = value
if self._enabled:
self._start()
else:
self._stop()
@property
def tx_frames(self):
return self._tx_frames
@property
def rx_frames(self):
return self._rx_frames
@property
def rx_unknown_tid(self):
return self._rx_unknown_tid # Tx TID not found
@property
def rx_unknown_me(self):
return self._rx_unknown_me
@property
def rx_onu_frames(self):
return self._rx_onu_frames
@property
def rx_onu_discards(self):
return self._rx_onu_discards # Attribute Value change autonomous overflows
@property
def rx_timeouts(self):
return self._rx_timeouts
@property
def rx_late(self):
return self._rx_late
@property
def tx_errors(self):
return self._tx_errors
@property
def consecutive_errors(self):
return self._consecutive_errors
@property
def reply_min(self):
return int(round(self._reply_min * 1000.0)) # Milliseconds
@property
def reply_max(self):
return int(round(self._reply_max * 1000.0)) # Milliseconds
@property
def reply_average(self):
avg = self._reply_sum / self._rx_frames if self._rx_frames > 0 else 0.0
return int(round(avg * 1000.0)) # Milliseconds
@property
def hp_tx_queue_len(self):
return len(self._pending[OMCI_CC.HIGH_PRIORITY])
@property
def lp_tx_queue_len(self):
return len(self._pending[OMCI_CC.LOW_PRIORITY])
@property
def max_hp_tx_queue(self):
return self._max_hp_tx_queue
@property
def max_lp_tx_queue(self):
return self._max_lp_tx_queue
def _start(self):
"""
Start the OMCI Communications Channel
"""
assert self._enabled, 'Start should only be called if enabled'
self.flush()
device = self._adapter_agent.get_device(self._device_id)
self._proxy_address = device.proxy_address
def _stop(self):
"""
Stop the OMCI Communications Channel
"""
assert not self._enabled, 'Stop should only be called if disabled'
self.flush()
self._proxy_address = None
def _receive_onu_message(self, rx_frame):
""" Autonomously generated ONU frame Rx handler"""
self.log.debug('rx-onu-frame', frame_type=type(rx_frame))
msg_type = rx_frame.fields['message_type']
self._rx_onu_frames += 1
msg = {TX_REQUEST_KEY: None,
RX_RESPONSE_KEY: rx_frame}
if msg_type == EntityOperations.AlarmNotification.value:
topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Alarm_Notification)
self.reactor.callLater(0, self.event_bus.publish, topic, msg)
elif msg_type == EntityOperations.AttributeValueChange.value:
topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.AVC_Notification)
self.reactor.callLater(0, self.event_bus.publish, topic, msg)
elif msg_type == EntityOperations.TestResult.value:
topic = OMCI_CC.event_bus_topic(self._device_id, RxEvent.Test_Result)
self.reactor.callLater(0, self.event_bus.publish, topic, msg)
else:
self.log.warn('onu-unsupported-autonomous-message', type=msg_type)
self._rx_onu_discards += 1
def _update_rx_tx_stats(self, now, ts):
ts_diff = now - arrow.Arrow.utcfromtimestamp(ts)
secs = ts_diff.total_seconds()
self._reply_sum += secs
if secs < self._reply_min:
self._reply_min = secs
if secs > self._reply_max:
self._reply_max = secs
return secs
def receive_message(self, msg):
"""
Receive and OMCI message from the proxy channel to the OLT.
Call this from your ONU Adapter on a new OMCI Rx on the proxy channel
:param msg: (str) OMCI binary message (used as input to Scapy packet decoder)
"""
if not self.enabled:
return
try:
now = arrow.utcnow()
d = None
# NOTE: Since we may need to do an independent ME map on a per-ONU basis
# save the current value of the entity_id_to_class_map, then
# replace it with our custom one before decode, and then finally
# restore it later. Tried other ways but really made the code messy.
saved_me_map = omci_entities.entity_id_to_class_map
omci_entities.entity_id_to_class_map = self._me_map
try:
rx_frame = msg if isinstance(msg, OmciFrame) else OmciFrame(msg)
except KeyError as e:
# Unknown, Unsupported, or vendor-specific ME. Key is the unknown classID
self.log.debug('frame-decode-key-error', msg=hexlify(msg), e=e)
rx_frame = self._decode_unknown_me(msg)
self._rx_unknown_me += 1
except Exception as e:
self.log.exception('frame-decode', msg=hexlify(msg), e=e)
return
finally:
omci_entities.entity_id_to_class_map = saved_me_map # Always restore it.
rx_tid = rx_frame.fields['transaction_id']
msg_type = rx_frame.fields['message_type']
# Filter the Test Result frame and route through receive onu
# message method.
if rx_tid == 0 or msg_type == EntityOperations.TestResult.value:
return self._receive_onu_message(rx_frame)
# Previously unreachable if this is the very first round-trip Rx or we
# have been running consecutive errors
if self._rx_frames == 0 or self._consecutive_errors != 0:
self.reactor.callLater(0, self._publish_connectivity_event, True)
self._rx_frames += 1
self._consecutive_errors = 0
try:
high_priority = self._tid_is_high_priority(rx_tid)
index = self._get_priority_index(high_priority)
# (timestamp, defer, frame, timeout, retry, delayedCall)
last_tx_tuple = self._tx_request[index]
if last_tx_tuple is None or \
last_tx_tuple[OMCI_CC.REQUEST_FRAME].fields.get('transaction_id') != rx_tid:
# Possible late Rx on a message that timed-out
self._rx_unknown_tid += 1
self._rx_late += 1
return
ts, d, tx_frame, timeout, retry, dc = last_tx_tuple
if dc is not None and not dc.cancelled and not dc.called:
dc.cancel()
_secs = self._update_rx_tx_stats(now, ts)
# Late arrival already serviced by a timeout?
if d.called:
self._rx_late += 1
return
except Exception as e:
self.log.exception('frame-match', msg=hexlify(msg), e=e)
if d is not None:
return d.errback(failure.Failure(e))
return
# Publish Rx event to listeners in a different task
reactor.callLater(0, self._publish_rx_frame, tx_frame, rx_frame)
# begin success callback chain (will cancel timeout and queue next Tx message)
self._rx_response[index] = rx_frame
d.callback(rx_frame)
except Exception as e:
self.log.exception('rx-msg', e=e)
def _decode_unknown_me(self, msg):
"""
Decode an ME for an unsupported class ID. This should only occur for a subset
of message types (Get, Set, MIB Upload Next, ...) and they should only be
responses as well.
There are some times below that are commented out. For VOLTHA 2.0, it is
expected that any get, set, create, delete for unique (often vendor) MEs
will be coded by the ONU utilizing it and supplied to OpenOMCI as a
vendor-specific ME during device initialization.
:param msg: (str) Binary data
:return: (OmciFrame) resulting frame
"""
from struct import unpack
(tid, msg_type, framing) = unpack('!HBB', msg[0:4])
assert framing == 0xa, 'Only basic OMCI framing supported at this time'
msg = msg[4:]
# TODO: Commented out items below are future work (not expected for VOLTHA v2.0)
(msg_class, kwargs) = {
# OmciCreateResponse.message_id: (OmciCreateResponse, None),
# OmciDeleteResponse.message_id: (OmciDeleteResponse, None),
# OmciSetResponse.message_id: (OmciSetResponse, None),
# OmciGetResponse.message_id: (OmciGetResponse, None),
# OmciGetAllAlarmsNextResponse.message_id: (OmciGetAllAlarmsNextResponse, None),
OmciMibUploadNextResponse.message_id: (OmciMibUploadNextResponse,
{
'entity_class': unpack('!H', msg[0:2])[0],
'entity_id': unpack('!H', msg[2:4])[0],
'object_entity_class': unpack('!H', msg[4:6])[0],
'object_entity_id': unpack('!H', msg[6:8])[0],
'object_attributes_mask': unpack('!H', msg[8:10])[0],
'object_data': {
UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[10:-4])
},
}),
# OmciAlarmNotification.message_id: (OmciAlarmNotification, None),
OmciAttributeValueChange.message_id: (OmciAttributeValueChange,
{
'entity_class': unpack('!H', msg[0:2])[0],
'entity_id': unpack('!H', msg[2:4])[0],
'data': {
UNKNOWN_CLASS_ATTRIBUTE_KEY: hexlify(msg[4:-8])
},
}),
# OmciTestResult.message_id: (OmciTestResult, None),
}.get(msg_type, None)
if msg_class is None:
raise TypeError('Unsupport Message Type for Unknown Decode: {}',
msg_type)
return OmciFrame(transaction_id=tid, message_type=msg_type,
omci_message=msg_class(**kwargs))
def _publish_rx_frame(self, tx_frame, rx_frame):
"""
Notify listeners of successful response frame
:param tx_frame: (OmciFrame) Original request frame
:param rx_frame: (OmciFrame) Response frame
"""
if self._enabled and isinstance(rx_frame, OmciFrame):
frame_type = rx_frame.fields['omci_message'].message_id
event_type = OMCI_CC._frame_to_event_type.get(frame_type)
if event_type is not None:
topic = OMCI_CC.event_bus_topic(self._device_id, event_type)
msg = {TX_REQUEST_KEY: tx_frame,
RX_RESPONSE_KEY: rx_frame}
self.event_bus.publish(topic=topic, msg=msg)
def _publish_connectivity_event(self, connected):
"""
Notify listeners of Rx/Tx connectivity over OMCI
:param connected: (bool) True if connectivity transitioned from unreachable
to reachable
"""
if self._enabled:
topic = OMCI_CC.event_bus_topic(self._device_id,
RxEvent.Connectivity)
msg = {CONNECTED_KEY: connected}
self.event_bus.publish(topic=topic, msg=msg)
def flush(self):
"""Flush/cancel in active or pending Tx requests"""
requests = []
for priority in {OMCI_CC.HIGH_PRIORITY, OMCI_CC.LOW_PRIORITY}:
next_frame, self._tx_request[priority] = self._tx_request[priority], None
if next_frame is not None:
requests.append((next_frame[OMCI_CC.REQUEST_DEFERRED], next_frame[OMCI_CC.REQUEST_DELAYED_CALL]))
requests += [(next_frame[OMCI_CC.PENDING_DEFERRED], None)
for next_frame in self._pending[priority]]
self._pending[priority] = list()
# Cancel them...
def cleanup_unhandled_error(_):
pass # So the cancel below does not flag an unhandled error
for d, dc in requests:
if d is not None and not d.called:
d.addErrback(cleanup_unhandled_error)
d.cancel()
if dc is not None and not dc.called and not dc.cancelled:
dc.cancel()
def _get_tx_tid(self, high_priority=False):
"""
Get the next Transaction ID for a tx. Note TID=0 is reserved
for autonomously generated messages from an ONU
:return: (int) TID
"""
if self._extended_messaging or not high_priority:
index = OMCI_CC.LOW_PRIORITY
min_tid = OMCI_CC.MIN_OMCI_TX_ID_LOW_PRIORITY
max_tid = OMCI_CC.MAX_OMCI_TX_ID_LOW_PRIORITY
else:
index = OMCI_CC.HIGH_PRIORITY
min_tid = OMCI_CC.MIN_OMCI_TX_ID_HIGH_PRIORITY
max_tid = OMCI_CC.MAX_OMCI_TX_ID_HIGH_PRIORITY
tx_tid, self._tx_tid[index] = self._tx_tid[index], self._tx_tid[index] + 1
if self._tx_tid[index] > max_tid:
self._tx_tid[index] = min_tid
return tx_tid
def _request_failure(self, value, tx_tid, high_priority):
"""
Handle a transmit failure. Rx Timeouts are handled on the 'dc' deferred and
will call a different method that may retry if requested. This routine
will be called after the final (if any) timeout or other error
:param value: (Failure) Twisted failure
:param tx_tid: (int) Associated Tx TID
"""
index = self._get_priority_index(high_priority)
if self._tx_request[index] is not None:
tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
tx_frame_tid = tx_frame.fields['transaction_id']
if tx_frame_tid == tx_tid:
timeout = self._tx_request[index][OMCI_CC.REQUEST_TIMEOUT]
dc = self._tx_request[index][OMCI_CC.REQUEST_DELAYED_CALL]
self._tx_request[index] = None
if dc is not None and not dc.called and not dc.cancelled:
dc.cancel()
if isinstance(value, failure.Failure):
value.trap(CancelledError)
self._rx_timeouts += 1
self._consecutive_errors += 1
if self._consecutive_errors == 1:
reactor.callLater(0, self._publish_connectivity_event, False)
self.log.debug('timeout', tx_id=tx_tid, timeout=timeout)
value = failure.Failure(TimeoutError(timeout, "Deferred"))
else:
# Search pending queue. This may be a cancel coming in from the original
# task that requested the Tx. If found, remove
# from pending queue
for index, request in enumerate(self._pending[index]):
req = request.get(OMCI_CC.PENDING_DEFERRED)
if req is not None and req.fields['transaction_id'] == tx_tid:
self._pending[index].pop(index)
break
self._send_next_request(high_priority)
return value
def _request_success(self, rx_frame, high_priority):
"""
Handle transmit success (a matching Rx was received)
:param rx_frame: (OmciFrame) OMCI response frame with matching TID
:return: (OmciFrame) OMCI response frame with matching TID
"""
index = self._get_priority_index(high_priority)
if rx_frame is None:
rx_frame = self._rx_response[index]
rx_tid = rx_frame.fields.get('transaction_id')
if rx_tid is not None:
if self._tx_request[index] is not None:
tx_frame = self._tx_request[index][OMCI_CC.REQUEST_FRAME]
tx_tid = tx_frame.fields['transaction_id']
if rx_tid == tx_tid:
# Remove this request. Next callback in chain initiates next Tx
self._tx_request[index] = None
else:
self._rx_late += 1
else:
self._rx_late += 1
self._send_next_request(high_priority)
# Return rx_frame (to next item in callback list)
return rx_frame
def _request_timeout(self, tx_tid, high_priority):
"""
Tx Request timed out. Resend immediately if there retries is non-zero. A
separate deferred (dc) is used on each actual Tx which is not the deferred
(d) that is returned to the caller of the 'send()' method.
If the timeout if the transmitted frame was zero, this is just cleanup of
that transmit request and not necessarily a transmit timeout
:param tx_tid: (int) TID of frame
:param high_priority: (bool) True if high-priority queue
"""
self.log.debug("_request_timeout", tx_tid=tx_tid)
index = self._get_priority_index(high_priority)
if self._tx_request[index] is not None:
# (0: timestamp, 1: defer, 2: frame, 3: timeout, 4: retry, 5: delayedCall)
ts, d, frame, timeout, retry, _dc = self._tx_request[index]
if frame.fields.get('transaction_id', 0) == tx_tid:
self._tx_request[index] = None
if timeout > 0:
self._rx_timeouts += 1
if retry > 0:
# Push on front of TX pending queue so that it transmits next with the
# original TID
self._queue_frame(d, frame, timeout, retry - 1, high_priority, front=True)
elif not d.called:
d.errback(failure.Failure(TimeoutError(timeout, "Send OMCI TID -{}".format(tx_tid))))
else:
self.log.warn('timeout-but-not-the-tx-frame') # Statement mainly for debugging
self._send_next_request(high_priority)
def _queue_frame(self, d, frame, timeout, retry, high_priority, front=False):
index = self._get_priority_index(high_priority)
tx_tuple = (d, frame, timeout, retry) # Pending -> (deferred, tx_frame, timeout, retry)
if front:
self._pending[index].insert(0, tuple)
else:
self._pending[index].append(tx_tuple)
# Monitor queue stats
qlen = len(self._pending[index])
if high_priority:
if self._max_hp_tx_queue < qlen:
self._max_hp_tx_queue = qlen
elif self._max_lp_tx_queue < qlen:
self._max_lp_tx_queue = qlen
self.log.debug("queue-size", index=index, pending_qlen=qlen)
def send(self, frame, timeout=DEFAULT_OMCI_TIMEOUT, retry=0, high_priority=False):
"""
Queue the OMCI Frame for a transmit to the ONU via the proxy_channel
:param frame: (OMCIFrame) Message to send
:param timeout: (int) Rx Timeout. 0=No response needed
:param retry: (int) Additional retry attempts on channel failure, default=0
:param high_priority: (bool) High Priority requests
:return: (deferred) A deferred that fires when the response frame is received
or if an error/timeout occurs
"""
if not self.enabled or self._proxy_address is None:
# TODO custom exceptions throughout this code would be helpful
self._tx_errors += 1
return fail(result=failure.Failure(Exception('OMCI is not enabled')))
timeout = float(timeout)
if timeout > float(MAX_OMCI_REQUEST_AGE):
self._tx_errors += 1
msg = 'Maximum timeout is {} seconds'.format(MAX_OMCI_REQUEST_AGE)
return fail(result=failure.Failure(Exception(msg)))
if not isinstance(frame, OmciFrame):
self._tx_errors += 1
msg = "Invalid frame class '{}'".format(type(frame))
return fail(result=failure.Failure(Exception(msg)))
try:
index = self._get_priority_index(high_priority)
tx_tid = frame.fields['transaction_id']
if tx_tid is None:
tx_tid = self._get_tx_tid(high_priority=high_priority)
frame.fields['transaction_id'] = tx_tid
assert tx_tid not in self._pending[index], 'TX TID {} is already exists'.format(tx_tid)
assert tx_tid > 0, 'Invalid Tx TID: {}'.format(tx_tid)
# Queue it and request next Tx if tx channel is free
d = defer.Deferred()
self._queue_frame(d, frame, timeout, retry, high_priority, front=False)
self._send_next_request(high_priority)
if timeout == 0:
self.log.debug("send-timeout-zero", tx_tid=tx_tid)
self.reactor.callLater(0, d.callback, 'queued')
return d
except Exception as e:
self._tx_errors += 1
self._consecutive_errors += 1
if self._consecutive_errors == 1:
self.reactor.callLater(0, self._publish_connectivity_event, False)
self.log.exception('send-omci', e=e)
return fail(result=failure.Failure(e))
def _ok_to_send(self, tx_request, high_priority):
"""
G.988 specifies not to issue a MIB upload or a Software download request
when a similar action is in progress on the other channel. To keep the
logic here simple, a new upload/download will not be allowed if either a
upload/download is going on
:param tx_request (OmciFrame) Frame to send
:param high_priority: (bool) for queue selection
:return: True if okay to dequeue and send frame
"""
other = self._get_priority_index(not high_priority)
if self._tx_request[other] is None:
return True
this_msg_type = tx_request.fields['message_type'] & 0x1f
not_allowed = {OP.MibUpload.value,
OP.MibUploadNext.value,
OP.StartSoftwareDownload.value,
OP.DownloadSection.value,
OP.EndSoftwareDownload.value}
if this_msg_type not in not_allowed:
return True
other_msg_type = self._tx_request[other][OMCI_CC.REQUEST_FRAME].fields['message_type'] & 0x1f
return other_msg_type not in not_allowed
def _send_next_request(self, high_priority):
"""
Pull next tx request and send it
:param high_priority: (bool) True if this was a high priority request
:return: results, so callback chain continues if needed
"""
index = self._get_priority_index(high_priority)
if self._tx_request[index] is None: # TODO or self._tx_request[index][OMCI_CC.REQUEST_DEFERRED].called:
d = None
try:
if len(self._pending[index]) and \
not self._ok_to_send(self._pending[index][0][OMCI_CC.PENDING_FRAME],
high_priority):
reactor.callLater(0.05, self._send_next_request, high_priority)
return
next_frame = self._pending[index].pop(0)
d = next_frame[OMCI_CC.PENDING_DEFERRED]
frame = next_frame[OMCI_CC.PENDING_FRAME]
timeout = next_frame[OMCI_CC.PENDING_TIMEOUT]
retry = next_frame[OMCI_CC.PENDING_RETRY]
tx_tid = frame.fields['transaction_id']
# NOTE: Since we may need to do an independent ME map on a per-ONU basis
# save the current value of the entity_id_to_class_map, then
# replace it with our custom one before decode, and then finally
# restore it later. Tried other ways but really made the code messy.
saved_me_map = omci_entities.entity_id_to_class_map
omci_entities.entity_id_to_class_map = self._me_map
ts = arrow.utcnow().float_timestamp
try:
self._rx_response[index] = None
self._adapter_agent.send_proxied_message(self._proxy_address,
hexify(str(frame)))
finally:
omci_entities.entity_id_to_class_map = saved_me_map
self._tx_frames += 1
# Note: the 'd' deferred in the queued request we just got will
# already have its success callback queued (callLater -> 0) with a
# result of "queued". Here we need time it out internally so
# we can call cleanup appropriately. G.988 mentions that most ONUs
# will process an request in < 1 second.
dc_timeout = timeout if timeout > 0 else 1.0
# Timeout on internal deferred to support internal retries if requested
dc = self.reactor.callLater(dc_timeout, self._request_timeout, tx_tid, high_priority)
# (timestamp, defer, frame, timeout, retry, delayedCall)
self._tx_request[index] = (ts, d, frame, timeout, retry, dc)
if timeout > 0:
d.addCallbacks(self._request_success, self._request_failure,
callbackArgs=(high_priority,),
errbackArgs=(tx_tid, high_priority))
except IndexError:
pass # Nothing pending in this queue
except Exception as e:
self.log.exception('send-proxy-exception', e=e)
self._tx_request[index] = None
self.reactor.callLater(0, self._send_next_request, high_priority)
if d is not None:
d.errback(failure.Failure(e))
else:
self.log.debug("tx-request-occupied", index=index)
###################################################################################
# MIB Action shortcuts
def send_mib_reset(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
"""
Perform a MIB Reset
"""
self.log.debug('send-mib-reset')
frame = OntDataFrame().mib_reset()
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_mib_upload(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
self.log.debug('send-mib-upload')
frame = OntDataFrame().mib_upload()
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_mib_upload_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
self.log.debug('send-mib-upload-next')
frame = OntDataFrame(sequence_number=seq_no).mib_upload_next()
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_reboot(self, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
"""
Send an ONU Device reboot request (ONU-G ME).
NOTICE: This method is being deprecated and replaced with a tasks to preform this function
"""
self.log.debug('send-mib-reboot')
frame = OntGFrame().reboot()
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_get_all_alarm(self, alarm_retrieval_mode=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
self.log.debug('send_get_alarm')
frame = OntDataFrame().get_all_alarm(alarm_retrieval_mode)
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_get_all_alarm_next(self, seq_no, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
self.log.debug('send_get_alarm_next')
frame = OntDataFrame().get_all_alarm_next(seq_no)
return self.send(frame, timeout=timeout, high_priority=high_priority)
def send_start_software_download(self, image_inst_id, image_size, window_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
frame = SoftwareImageFrame(image_inst_id).start_software_download(image_size, window_size-1)
return self.send(frame, timeout, 3, high_priority=high_priority)
def send_download_section(self, image_inst_id, section_num, data, size=DEFAULT_OMCI_DOWNLOAD_SECTION_SIZE, timeout=0, high_priority=False):
"""
# timeout=0 indicates no repons needed
"""
# self.log.debug("send_download_section", instance_id=image_inst_id, section=section_num, timeout=timeout)
if timeout > 0:
frame = SoftwareImageFrame(image_inst_id).download_section(True, section_num, data)
else:
frame = SoftwareImageFrame(image_inst_id).download_section(False, section_num, data)
return self.send(frame, timeout, high_priority=high_priority)
# if timeout > 0:
# self.reactor.callLater(0, self.sim_receive_download_section_resp,
# frame.fields["transaction_id"],
# frame.fields["omci_message"].fields["section_number"])
# return d
def send_end_software_download(self, image_inst_id, crc32, image_size, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
frame = SoftwareImageFrame(image_inst_id).end_software_download(crc32, image_size)
return self.send(frame, timeout, high_priority=high_priority)
# self.reactor.callLater(0, self.sim_receive_end_software_download_resp, frame.fields["transaction_id"])
# return d
def send_active_image(self, image_inst_id, flag=0, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
frame = SoftwareImageFrame(image_inst_id).activate_image(flag)
return self.send(frame, timeout, high_priority=high_priority)
def send_commit_image(self, image_inst_id, timeout=DEFAULT_OMCI_TIMEOUT, high_priority=False):
frame = SoftwareImageFrame(image_inst_id).commit_image()
return self.send(frame, timeout, high_priority=high_priority)
| |
import subprocess as sp
import warnings
import numpy as np
from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
from moviepy.config import get_setting
from moviepy.compat import PY3, DEVNULL
import os
class FFMPEG_AudioReader:
"""
A class to read the audio in either video files or audio files
using ffmpeg. ffmpeg will read any audio and transform them into
raw data.
Parameters
------------
filename
Name of any video or audio file, like ``video.mp4`` or
``sound.wav`` etc.
buffersize
The size of the buffer to use. Should be bigger than the buffer
used by ``to_audiofile``
print_infos
Print the ffmpeg infos on the file being read (for debugging)
fps
Desired frames per second in the decoded signal that will be
received from ffmpeg
nbytes
Desired number of bytes (1,2,4) in the signal that will be
received from ffmpeg
"""
def __init__(self, filename, buffersize, print_infos=False,
fps=44100, nbytes=2, nchannels=2):
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.f = 's%dle'%(8*nbytes)
self.acodec = 'pcm_s%dle'%(8*nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename)
self.duration = infos['duration']
if 'video_duration' in infos:
self.duration = infos['video_duration']
else:
self.duration = infos['duration']
self.infos = infos
self.proc = None
self.nframes = int(self.fps * self.duration)
self.buffersize= min( self.nframes+1, buffersize )
self.buffer= None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
def initialize(self, starttime = 0):
""" Opens the file, creates the pipe. """
self.close_proc() # if any
if starttime !=0 :
offset = min(1,starttime)
i_arg = ["-ss", "%.05f"%(starttime-offset),
'-i', self.filename, '-vn',
"-ss", "%.05f"%offset]
else:
i_arg = [ '-i', self.filename, '-vn']
cmd = ([get_setting("FFMPEG_BINARY")] + i_arg +
[ '-loglevel', 'error',
'-f', self.f,
'-acodec', self.acodec,
'-ar', "%d"%self.fps,
'-ac', '%d'%self.nchannels, '-'])
popen_params = {"bufsize": self.buffersize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen( cmd, **popen_params)
self.pos = np.round(self.fps*starttime)
def skip_chunk(self,chunksize):
s = self.proc.stdout.read(self.nchannels*chunksize*self.nbytes)
self.proc.stdout.flush()
self.pos = self.pos+chunksize
def read_chunk(self,chunksize):
# chunksize is not being autoconverted from float to int
chunksize = int(round(chunksize))
L = self.nchannels*chunksize*self.nbytes
s = self.proc.stdout.read(L)
dt = {1: 'int8',2:'int16',4:'int32'}[self.nbytes]
result = np.fromstring(s, dtype=dt)
result = (1.0*result / 2**(8*self.nbytes-1)).\
reshape((int(len(result)/self.nchannels),
self.nchannels))
#self.proc.stdout.flush()
self.pos = self.pos+chunksize
return result
def seek(self,pos):
"""
Reads a frame at time t. Note for coders: getting an arbitrary
frame in the video with ffmpeg can be painfully slow if some
decoding has to be done. This function tries to avoid fectching
arbitrary frames whenever possible, by moving between adjacent
frames.
"""
if (pos < self.pos) or (pos> (self.pos+1000000)):
t = 1.0*pos/self.fps
self.initialize(t)
elif pos > self.pos:
#print pos
self.skip_chunk(pos-self.pos)
# last case standing: pos = current pos
self.pos = pos
def close_proc(self):
if hasattr(self, 'proc') and self.proc is not None:
self.proc.terminate()
for std in [ self.proc.stdout,
self.proc.stderr]:
std.close()
self.proc = None
def get_frame(self, tt):
buffersize = self.buffersize
if isinstance(tt,np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt>=0) & (tt < self.duration)
# Check that the requested time is in the valid range
if not in_time.any():
raise IOError("Error in file %s, "%(self.filename)+
"Accessing time t=%.02f-%.02f seconds, "%(tt[0], tt[-1])+
"with clip duration=%d seconds, "%self.duration)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps*tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <=
(fr_min - self.buffer_startframe)
< len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <=
(fr_max - self.buffer_startframe)
< len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt),self.nchannels))
indices = frames - self.buffer_startframe
if len(self.buffer) < self.buffersize // 2:
indices = indices - (self.buffersize // 2 - len(self.buffer) + 1)
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
warnings.warn("Error in file %s, "%(self.filename)+
"At time t=%.02f-%.02f seconds, "%(tt[0], tt[-1])+
"indices wanted: %d-%d, "%(indices.min(), indices.max())+
"but len(buffer)=%d\n"%(len(self.buffer))+ str(error),
UserWarning)
# repeat the last frame instead
indices[indices>=len(self.buffer)] = len(self.buffer) -1
result[in_time] = self.buffer[indices]
return result
else:
ind = int(self.fps*tt)
if ind<0 or ind> self.nframes: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) <len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
def buffer_around(self,framenumber):
"""
Fills the buffer with frames, centered on ``framenumber``
if possible
"""
# start-frame for the buffer
new_bufferstart = max(0, framenumber - self.buffersize // 2)
if (self.buffer is not None):
current_f_end = self.buffer_startframe + self.buffersize
if (new_bufferstart <
current_f_end <
new_bufferstart + self.buffersize):
# We already have one bit of what must be read
conserved = current_f_end - new_bufferstart + 1
chunksize = self.buffersize-conserved
array = self.read_chunk(chunksize)
self.buffer = np.vstack([self.buffer[-conserved:], array])
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
self.buffer_startframe = new_bufferstart
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close_proc()
| |
from __future__ import unicode_literals
import os
from sprinter import lib
EMPTY = object()
bool_to_str = {
'bool': { True: 'true', False: 'false' },
't_f': { True: 't', False: 'f' },
'y_n': { True: 'y', False: 'n' },
'yes_no': { True: 'yes', False: 'no' }
}
class InputException(Exception):
pass
class Input(object):
""" struct to hold input information """
value = EMPTY
default = EMPTY
is_secret = False
prompt = None
in_type = None
out_type = None
def is_empty(self, with_defaults=True):
return self.value is EMPTY and (not with_defaults or self.default is EMPTY)
def is_bool(self):
return (self.in_type == 'bool' or
self.in_type == 't_f' or
self.in_type == 'y_n' or
self.in_type == 'yes_no')
def __str__(self):
""" Return the string value, defaulting to default values """
str_value = ''
if self.value is not EMPTY and self.value is not None:
str_value = self.value
elif self.default is not EMPTY:
str_value = self.default
if self.in_type == 'file' or self.in_type == 'path':
return os.path.expanduser(str_value)
elif self.is_bool():
bool_value = None
if self.in_type == 'bool' or self.in_type == 'y_n':
bool_value = lib.is_affirmative(str_value)
elif self.in_type == 'yes_no':
bool_value = str_value.lower() == 'yes'
out_type = self.in_type if self.out_type is None else self.out_type
return bool_to_str[out_type][bool_value]
else:
return str_value
def __eq__(self, other):
for val in ('value', 'default', 'is_secret', 'prompt'):
a = getattr(self, val, None)
b = getattr(other, val, None)
if not (a == b or a is b):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
__repr__ = __str__
class Inputs(object):
"""" A class to handle user input """
def __init__(self):
self._inputs = {}
def add_input(self, key, input_instance=None):
""" Add an input <input> with a possible <value>, and <is_secret>"""
self._inputs[key] = input_instance or Input()
def is_input(self, key):
""" Returns true if <key> is a key """
return key in self._inputs
def is_set(self, key):
if key not in self._inputs:
raise InputException("Key {0} is not a valid input!".format(key))
return self._inputs[key].value is not EMPTY
def set_input(self, key, value):
""" Sets the <key> to <value> """
if key not in self._inputs:
raise InputException("Key {0} is not a valid input!".format(key))
self._inputs[key].value = value
def get_input(self, key, force=False):
""" Get the value of <key> if it already exists, or prompt for it if not """
if key not in self._inputs:
raise InputException("Key {0} is not a valid input!".format(key))
if self._inputs[key].prompt:
prompt = self._inputs[key].prompt
elif self._inputs[key].is_bool():
prompt = "{0}?".format(key)
else:
prompt = "please enter your {0}".format(key)
help_text = self._inputs[key].help if hasattr(self._inputs[key], 'help') else None
if self._inputs[key].value is EMPTY or force:
default_value = None
if self._inputs[key].default is not EMPTY:
default_value = self._inputs[key].default
if self._inputs[key].value is not EMPTY:
default_value = self._inputs[key].value
input_value = EMPTY
while input_value is EMPTY or input_value == '?':
if input_value == '?' and help_text:
print(help_text)
input_value = lib.prompt(
prompt,
default=default_value,
bool_type=self._inputs[key].in_type,
secret=self._inputs[key].is_secret)
self._inputs[key].value = input_value
return self._inputs[key].value
def get_unset_inputs(self):
""" Return a set of unset inputs """
return set([k for k, v in self._inputs.items() if v.is_empty(False)])
def prompt_unset_inputs(self, force=False):
""" Prompt for unset input values """
for k, v in self._inputs.items():
if force or v.is_empty(False):
self.get_input(k, force=force)
def keys(self):
""" Return a set of valid keys """
return self._inputs.keys()
def values(self, with_defaults=True):
""" Return the values dictionary, defaulting to default values """
return dict(((k, str(v)) for k, v in self._inputs.items() if not v.is_empty(with_defaults)))
def write_values(self):
""" Return the dictionary with which to write values """
return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False)))
def add_inputs_from_inputstring(self, input_string):
"""
Add inputs using the input string format:
gitroot==~/workspace
username
password?
main_branch==comp_main
"""
raw_params = input_string.split('\n')
param_attributes = (self._parse_param_line(rp) for rp in raw_params if len(rp.strip(' \t')) > 0)
for param, attributes in param_attributes:
self.add_input(param, attributes)
def _parse_param_line(self, line):
""" Parse a single param line. """
value = line.strip('\n \t')
if len(value) > 0:
i = Input()
if value.find('#') != -1:
value, extra_attributes = value.split('#')
try:
extra_attributes = eval(extra_attributes)
except SyntaxError:
raise InputException("Incorrectly formatted input for {0}!".format(value))
if not isinstance(extra_attributes, dict):
raise InputException("Incorrectly formatted input for {0}!".format(value))
if 'prompt' in extra_attributes:
i.prompt = extra_attributes['prompt']
if 'help' in extra_attributes:
i.help = extra_attributes['help']
if 'type' in extra_attributes:
i.in_type = extra_attributes['type']
if i.in_type.find('/') != -1:
i.in_type, i.out_type = i.in_type.split('/')
if 'cast' in extra_attributes:
i.out_type = extra_attributes['cast']
if value.find('==') != -1:
value, default = value.split('==')
i.default = default
if value.endswith('?'):
value = value[:-1]
i.is_secret = True
return (value, i)
return None
| |
##### PATTERN | VECTOR | PORTER STEMMER ##################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
##########################################################################
# The Porter2 stemming algorithm (or "Porter stemmer") is a process for removing the commoner
# morphological and inflexional endings from words in English.
# Its main use is as part of a term normalisation process that is usually done
# when setting up Information Retrieval systems.
# Reference:
# C.J. van Rijsbergen, S.E. Robertson and M.F. Porter, 1980.
# "New models in probabilistic information retrieval."
# London: British Library. (British Library Research and Development Report, no. 5587).
#
# http://tartarus.org/~martin/PorterStemmer/
# Comments throughout the source code were taken from:
# http://snowball.tartarus.org/algorithms/english/stemmer.html
import re
#-------------------------------------------------------------------------
# Note: this module is optimized for performance.
# There is little gain in using more regular expressions.
VOWELS = ["a", "e", "i", "o", "u", "y"]
DOUBLE = ["bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt"]
VALID_LI = ["b", "c", "d", "e", "g", "h", "k", "m", "n", "r", "t"]
def is_vowel(s):
return s in VOWELS
def is_consonant(s):
return s not in VOWELS
def is_double_consonant(s):
return s in DOUBLE
def is_short_syllable(w, before=None):
""" A short syllable in a word is either:
- a vowel followed by a non-vowel other than w, x or Y and preceded by a non-vowel
- a vowel at the beginning of the word followed by a non-vowel.
Checks the three characters before the given index in the word (or entire word if None).
"""
if before != None:
i = before < 0 and len(w) + before or before
return is_short_syllable(w[max(0, i - 3):i])
if len(w) == 3 and is_consonant(w[0]) and is_vowel(w[1]) and is_consonant(w[2]) and w[2] not in "wxY":
return True
if len(w) == 2 and is_vowel(w[0]) and is_consonant(w[1]):
return True
return False
def is_short(w):
"""A word is called short if it consists of a short syllable preceded by
zero or more consonants."""
return is_short_syllable(w[-3:]) and len([ch for ch in w[:-3] if ch in VOWELS]) == 0
# A point made at least twice in the literature is that words beginning with gener-
# are overstemmed by the Porter stemmer:
# generate => gener, generically => gener
# Moving the region one vowel-consonant pair to the right fixes this:
# generate => generat, generically => generic
overstemmed = ("gener", "commun", "arsen")
RE_R1 = re.compile(r"[aeiouy][^aeiouy]")
def R1(w):
""" R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
"""
m = RE_R1.search(w)
if m:
return w[m.end():]
return ""
def R2(w):
""" R2 is the region after the first non-vowel following a vowel in R1,
or the end of the word if there is no such non-vowel.
"""
if w.startswith(tuple(overstemmed)):
return R1(R1(R1(w)))
return R1(R1(w))
def find_vowel(w):
"""Returns the index of the first vowel in the word.
When no vowel is found, returns len(word).
"""
for i, ch in enumerate(w):
if ch in VOWELS:
return i
return len(w)
def has_vowel(w):
"""Returns True if there is a vowel in the given string."""
for ch in w:
if ch in VOWELS:
return True
return False
def vowel_consonant_pairs(w, max=None):
""" Returns the number of consecutive vowel-consonant pairs in the word.
"""
m = 0
for i, ch in enumerate(w):
if is_vowel(ch) and i < len(w) - 1 and is_consonant(w[i + 1]):
m += 1
# An optimisation to stop searching once we reach the amount of
# <vc> pairs we need.
if m == max:
break
return m
#--- REPLACEMENT RULES ---------------------------------------------------
def step_1a(w):
""" Step 1a handles -s suffixes.
"""
if w.endswith("s"):
if w.endswith("sses"):
return w[:-2]
if w.endswith("ies"):
# Replace by -ie if preceded by just one letter,
# otherwise by -i (so ties => tie, cries => cri).
return len(w) == 4 and w[:-1] or w[:-2]
if w.endswith(("us", "ss")):
return w
if find_vowel(w) < len(w) - 2:
# Delete -s if the preceding part contains a vowel not immediately before the -s
# (so gas and this retain the -s, gaps and kiwis lose it).
return w[:-1]
return w
def step_1b(w):
""" Step 1b handles -ed and -ing suffixes (or -edly and -ingly).
Removes double consonants at the end of the stem and adds -e to some words.
"""
if w.endswith("y") and w.endswith(("edly", "ingly")):
w = w[:-2] # Strip -ly for next step.
if w.endswith(("ed", "ing")):
if w.endswith("ied"):
# See -ies in step 1a.
return len(w) == 4 and w[:-1] or w[:-2]
if w.endswith("eed"):
# Replace by -ee if preceded by at least one vowel-consonant pair.
return R1(w).endswith("eed") and w[:-1] or w
for suffix in ("ed", "ing"):
# Delete if the preceding word part contains a vowel.
# - If the word ends -at, -bl or -iz add -e (luxuriat => luxuriate).
# - If the word ends with a double remove the last letter (hopp => hop).
# - If the word is short, add e (hop => hope).
if w.endswith(suffix) and has_vowel(w[:-len(suffix)]):
w = w[:-len(suffix)]
if w.endswith(("at", "bl", "iz")):
return w + "e"
if is_double_consonant(w[-2:]):
return w[:-1]
if is_short(w):
return w + "e"
return w
def step_1c(w):
""" Step 1c replaces suffix -y or -Y by -i if preceded by a non-vowel
which is not the first letter of the word (cry => cri, by => by, say => say).
"""
if len(w) > 2 and w.endswith(("y", "Y")) and is_consonant(w[-2]):
return w[:-1] + "i"
return w
suffixes2 = [
("al", (("ational", "ate"), ("tional", "tion"))),
("ci", (("enci", "ence"), ("anci", "ance"))),
("er", (("izer", "ize"),)),
("li", (("bli", "ble"), ("alli", "al"),
("entli", "ent"), ("eli", "e"), ("ousli", "ous"))),
("on", (("ization", "ize"), ("isation", "ize"), ("ation", "ate"))),
("or", (("ator", "ate"),)),
("ss", (("iveness", "ive"), ("fulness", "ful"), ("ousness", "ous"))),
("sm", (("alism", "al"),)),
("ti", (("aliti", "al"), ("iviti", "ive"), ("biliti", "ble"))),
("gi", (("logi", "log"),))
]
def step_2(w):
""" Step 2 replaces double suffixes (singularization => singularize).
This only happens if there is at least one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes2:
if w.endswith(suffix):
for A, B in rules:
if w.endswith(A):
return R1(w).endswith(A) and w[:-len(A)] + B or w
if w.endswith("li") and R1(w)[-3:-2] in VALID_LI:
# Delete -li if preceded by a valid li-ending.
return w[:-2]
return w
suffixes3 = [
("e", (("icate", "ic"), ("ative", ""), ("alize", "al"))),
("i", (("iciti", "ic"),)),
("l", (("ical", "ic"), ("ful", ""))),
("s", (("ness", ""),))
]
def step_3(w):
""" Step 3 replaces -ic, -ful, -ness etc. suffixes.
This only happens if there is at least one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes3:
if w.endswith(suffix):
for A, B in rules:
if w.endswith(A):
return R1(w).endswith(A) and w[:-len(A)] + B or w
return w
suffixes4 = [
("al", ("al",)),
("ce", ("ance", "ence")),
("er", ("er",)),
("ic", ("ic",)),
("le", ("able", "ible")),
("nt", ("ant", "ement", "ment", "ent")),
("e", ("ate", "ive", "ize")),
(("m", "i", "s"), ("ism", "iti", "ous"))
]
def step_4(w):
""" Step 4 strips -ant, -ent etc. suffixes.
This only happens if there is more than one vowel-consonant pair before the suffix.
"""
for suffix, rules in suffixes4:
if w.endswith(suffix):
for A in rules:
if w.endswith(A):
return R2(w).endswith(A) and w[:-len(A)] or w
if R2(w).endswith("ion") and w[:-3].endswith(("s", "t")):
# Delete -ion if preceded by s or t.
return w[:-3]
return w
def step_5a(w):
""" Step 5a strips suffix -e if preceded by multiple vowel-consonant pairs,
or one vowel-consonant pair that is not a short syllable.
"""
if w.endswith("e"):
if R2(w).endswith("e") or R1(w).endswith("e") and not is_short_syllable(w, before=-1):
return w[:-1]
return w
def step_5b(w):
""" Step 5b strips suffix -l if preceded by l and multiple vowel-consonant pairs,
bell => bell, rebell => rebel.
"""
if w.endswith("ll") and R2(w).endswith("l"):
return w[:-1]
return w
#--- EXCEPTIONS ----------------------------------------------------------
# Exceptions:
# - in, out and can stems could be seen as stop words later on.
# - Special -ly cases.
exceptions = {
"skis": "ski",
"skies": "sky",
"dying": "die",
"lying": "lie",
"tying": "tie",
"innings": "inning",
"outings": "outing",
"cannings": "canning",
"idly": "idl",
"gently": "gentl",
"ugly": "ugli",
"early": "earli",
"only": "onli",
"singly": "singl"
}
# Words that are never stemmed:
uninflected = dict.fromkeys([
"sky",
"news",
"howe",
"inning", "outing", "canning",
"proceed", "exceed", "succeed",
"atlas", "cosmos", "bias", "andes" # not plural forms
], True)
#--- STEMMER -------------------------------------------------------------
def case_sensitive(stem, word):
""" Applies the letter case of the word to the stem:
Ponies => Poni
"""
ch = []
for i in range(len(stem)):
if word[i] == word[i].upper():
ch.append(stem[i].upper())
else:
ch.append(stem[i])
return "".join(ch)
def upper_consonant_y(w):
"""Sets the initial y, or y after a vowel, to Y.
Of course, y is interpreted as a vowel and Y as a consonant.
"""
a = []
p = None
for ch in w:
if ch == "y" and (p is None or p in VOWELS):
a.append("Y")
else:
a.append(ch)
p = ch
return "".join(a)
# If we stemmed a word once, we can cache the result and reuse it.
# By default, keep a history of a 10000 entries (<500KB).
cache = {}
def stem(word, cached=True, history=10000, **kwargs):
""" Returns the stem of the given word: ponies => poni.
Note: it is often taken to be a crude error
that a stemming algorithm does not leave a real word after removing the stem.
But the purpose of stemming is to bring variant forms of a word together,
not to map a word onto its "paradigm" form.
"""
stem = word.lower()
if cached and stem in cache:
return case_sensitive(cache[stem], word)
if cached and len(cache) > history: # Empty cache every now and then.
cache.clear()
if len(stem) <= 2:
# If the word has two letters or less, leave it as it is.
return case_sensitive(stem, word)
if stem in exceptions:
return case_sensitive(exceptions[stem], word)
if stem in uninflected:
return case_sensitive(stem, word)
# Mark y treated as a consonant as Y.
stem = upper_consonant_y(stem)
for f in (step_1a, step_1b, step_1c, step_2, step_3, step_4, step_5a, step_5b):
stem = f(stem)
# Turn any remaining Y letters in the stem back into lower case.
# Apply the case of the original word to the stem.
stem = stem.lower()
stem = case_sensitive(stem, word)
if cached:
cache[word.lower()] = stem.lower()
return stem
| |
#!/usr/bin/env python
# define some variables
from __future__ import print_function
from builtins import input
import numpy
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
plot_map_pts.py
DESCRIPTION
plots points on map
SYNTAX
plot_map_pts.py [command line options]
OPTIONS
-h prints help and quits
-sym [ro, bs, g^, r., b-, etc.] [1,5,10] symbol and size for points
colors are r=red,b=blue,g=green, etc.
symbols are '.' for points, ^, for triangle, s for square, etc.
-, for lines, -- for dotted lines, see matplotlib online documentation for plot()
-eye ELAT ELON [specify eyeball location]
-etp put on topography
-cmap color map [default is jet]
-f FILE, specify input file
-o color ocean blue/land green (default is not)
-res [c,l,i,h] specify resolution (crude, low, intermediate, high]
-fmt [pdf,eps, png] specify output format (default is pdf)
-R don't plot details of rivers
-B don't plot national/state boundaries, etc.
-pad [LAT LON] pad bounding box by LAT/LON (default is not)
-grd SPACE specify grid spacing
-sav save plot and quit
-prj PROJ, specify one of the supported projections:
pc = Plate Carree
aea = Albers Equal Area
aeqd = Azimuthal Equidistant
lcc = Lambert Conformal
lcyl = Lambert Cylindrical
merc = Mercator
mill = Miller Cylindrical
moll = Mollweide [default]
ortho = Orthographic
robin = Robinson
sinu = Sinusoidal
stere = Stereographic
tmerc = Transverse Mercator
utm = UTM
laea = Lambert Azimuthal Equal Area
geos = Geostationary
npstere = North-Polar Stereographic
spstere = South-Polar Stereographic
Special codes for MagIC formatted input files:
-n
-l
INPUTS
space or tab delimited LON LAT data
OR:
standard MagIC formatted er_sites or pmag_results table
DEFAULTS
res: c
prj: mollweide; lcc for MagIC format files
ELAT,ELON = 0,0
pad LAT,LON=0,0
NB: high resolution or lines can be very slow
"""
dir_path='.'
plot=0
ocean=0
res='c'
proj='moll'
Lats,Lons=[],[]
fmt='pdf'
sym='ro'
symsize=5
fancy=0
rivers,boundaries,ocean=1,1,0
latmin,latmax,lonmin,lonmax,lat_0,lon_0=-90,90,0.,360.,0.,0.
padlat,padlon,gridspace=0,0,30
lat_0,lon_0="",""
basemap=1
prn_name,prn_loc,names,locs=0,0,[],[]
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-res' in sys.argv:
ind = sys.argv.index('-res')
res=sys.argv[ind+1]
if res!= 'c' and res!='l':
print('this resolution will take a while - be patient')
if '-etp' in sys.argv:
fancy=1
print ('-W- plotting will require patience!')
if '-ctp' in sys.argv: basemap=0
if '-sav' in sys.argv: plot=1
if '-R' in sys.argv:rivers=0
if '-B' in sys.argv:boundaries=0
if '-o' in sys.argv:ocean=1
if '-cmap' in sys.argv:
ind = sys.argv.index('-cmap')
cmap=float(sys.argv[ind+1])
else:
cmap='jet'
if '-grd' in sys.argv:
ind = sys.argv.index('-grd')
gridspace=float(sys.argv[ind+1])
if '-eye' in sys.argv:
ind = sys.argv.index('-eye')
lat_0=float(sys.argv[ind+1])
lon_0=float(sys.argv[ind+2])
if '-sym' in sys.argv:
ind = sys.argv.index('-sym')
sym=sys.argv[ind+1]
symsize=int(sys.argv[ind+2])
if '-pad' in sys.argv:
ind = sys.argv.index('-pad')
padlat=float(sys.argv[ind+1])
padlon=float(sys.argv[ind+2])
if '-f' in sys.argv:
ind = sys.argv.index('-f')
file=dir_path+'/'+sys.argv[ind+1]
header=open(file,'r').readlines()[0].split('\t')
if 'tab' in header[0]:
proj='lcc'
if 'sites' in header[1]:
latkey='lat'
lonkey='lon'
namekey='site'
lockey=''
else:
print('file type not supported')
print(main.__doc__)
sys.exit()
Sites,file_type=pmag.magic_read(file)
Lats=pmag.get_dictkey(Sites,latkey,'f')
Lons=pmag.get_dictkey(Sites,lonkey,'f')
if prn_name==1:names=pmag.get_dictkey(Sites,namekey,'')
if prn_loc==1:names=pmag.get_dictkey(Sites,lockey,'')
else:
ptdata=numpy.loadtxt(file)
Lons=ptdata.transpose()[0]
Lats=ptdata.transpose()[1]
latmin=numpy.min(Lats)-padlat
lonmin=numpy.min(Lons)-padlon
latmax=numpy.max(Lats)+padlat
lonmax=numpy.max(Lons)+padlon
if lon_0=="":
lon_0=0.5*(lonmin+lonmax)
lat_0=0.5*(latmin+latmax)
else:
print("input file must be specified")
sys.exit()
if '-prj' in sys.argv:
ind = sys.argv.index('-prj')
proj=sys.argv[ind+1]
FIG={'map':1}
pmagplotlib.plot_init(FIG['map'],6,6)
cnt=0
Opts={'latmin':latmin,'latmax':latmax,'lonmin':lonmin,'lonmax':lonmax,'lat_0':lat_0,'lon_0':lon_0,'proj':proj,'sym':sym,'symsize':3,'pltgrid':1,'res':res,'boundinglat':0.,'padlon':padlon,'padlat':padlat,'gridspace':gridspace,'cmap':cmap}
Opts['details']={}
Opts['details']['coasts']=1
Opts['details']['rivers']=rivers
Opts['details']['states']=boundaries
Opts['details']['countries']=boundaries
Opts['details']['ocean']=ocean
Opts['details']['fancy']=fancy
if len(names)>0:Opts['names']=names
if len(locs)>0:Opts['loc_name']=locs
if proj=='merc':
Opts['latmin']=-70
Opts['latmax']=70
Opts['lonmin']=-180
Opts['lonmax']=180
print('please wait to draw points')
Opts['sym']=sym
Opts['symsize']=symsize
if basemap:
pmagplotlib.plot_map(FIG['map'],Lats,Lons,Opts)
else:
pmagplotlib.plot_map(FIG['map'],Lats,Lons,Opts)
files={}
titles={}
titles['map']='PT Map'
for key in list(FIG.keys()):
files[key]='map_pts'+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
FIG = pmagplotlib.add_borders(FIG,titles,black,purple)
pmagplotlib.save_plots(FIG,files)
if plot==1:
pmagplotlib.save_plots(FIG,files)
else:
pmagplotlib.draw_figs(FIG)
ans=input(" S[a]ve to save plot, Return to quit: ")
if ans=="a": pmagplotlib.save_plots(FIG,files)
if __name__ == "__main__":
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Normalize for domain_id, i.e. ensure User and Project entities have the
domain_id as a first class attribute.
Both User and Project (as well as Group) entities are owned by a
domain, which is implemented as each having a domain_id foreign key
in their sql representation that points back to the respective
domain in the domain table. This domain_id attribute should also
be required (i.e. not nullable)
Adding a non_nullable foreign key attribute to a table with existing
data causes a few problems since not all DB engines support the
ability to either control the triggering of integrity constraints
or the ability to modify columns after they are created.
To get round the above inconsistencies, two versions of the
upgrade/downgrade functions are supplied, one for those engines
that support dropping columns, and one for those that don't. For
the latter we are forced to do table copy AND control the triggering
of integrity constraints.
"""
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from keystone import config
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
def _disable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 0;')
def _enable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 1;')
def upgrade_user_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the user table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# First make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
# Now switch off constraints while we drop and then re-create the
# user table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column("password", sql.String(128)),
sql.Column("enabled", sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
user_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled, domain_id) '
'values ( :id, :name, :extra, '
':password, :enabled, :domain_id);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_user;')
def upgrade_project_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the project table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
# Now switch off constraints while we drop and then re-create the
# project table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
project_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled, domain_id) '
'values ( :id, :name, :extra, '
':description, :enabled, :domain_id);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_project;')
def downgrade_user_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, '
'password, enabled, extra) '
'values ( :id, :name, '
':password, :enabled, :extra);',
{'id': user.id,
'name': user.name,
'password': user.password,
'enabled': user.enabled,
'extra': user.extra})
# Now switch off constraints while we drop and then re-create the
# user table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
user_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
session.execute('drop table temp_user;')
def downgrade_project_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, '
'description, enabled, extra) '
'values ( :id, :name, '
':description, :enabled, :extra);',
{'id': project.id,
'name': project.name,
'description': project.description,
'enabled': project.enabled,
'extra': project.extra})
# Now switch off constraints while we drop and then re-create the
# project table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
project_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
session.execute("drop table temp_project;")
def upgrade_user_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
user_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for user in session.query(user_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = user_table.update().\
where(user_table.c.id == user.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
user_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT user_name_key;')
session.execute('ALTER TABLE "user" ADD CONSTRAINT user_dom_name_unique '
'UNIQUE (domain_id, name);')
def upgrade_project_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
project_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for project in session.query(project_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = project_table.update().\
where(project_table.c.id == project.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
project_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT tenant_name_key;')
session.execute('ALTER TABLE project ADD CONSTRAINT proj_dom_name_unique '
'UNIQUE (domain_id, name);')
def downgrade_user_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT '
'user_dom_name_unique;')
session.execute('ALTER TABLE "user" ADD UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(user_table)
def downgrade_project_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT '
'proj_dom_name_unique;')
session.execute('ALTER TABLE project ADD CONSTRAINT tenant_name_key '
'UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(project_table)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
upgrade_user_table_with_copy(meta, migrate_engine, session)
upgrade_project_table_with_copy(meta, migrate_engine, session)
else:
upgrade_user_table_with_col_create(meta, migrate_engine, session)
upgrade_project_table_with_col_create(meta, migrate_engine, session)
session.commit()
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
downgrade_user_table_with_copy(meta, migrate_engine, session)
downgrade_project_table_with_copy(meta, migrate_engine, session)
else:
# MySQL should in theory be able to use this path, but seems to
# have problems dropping columns which are foreign keys
downgrade_user_table_with_col_drop(meta, migrate_engine, session)
downgrade_project_table_with_col_drop(meta, migrate_engine, session)
session.commit()
session.close()
| |
#/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
# Tests related to getting defined objects or resources using basic client explore operations
import unittest
import re
import common
import tools_common
def client_explore(config, *args):
return tools_common.run_client(config, tools_common.CLIENT_EXPLORE, *args)
class TestExplore(tools_common.AwaTest):
@unittest.skip("maintenance headache")
def test_explore(self):
expectedStdout = """Object ID:0 name:LWM2MSecurity minInstances:1 maxInstances:65535
Resource: ID:0 name:LWM2MServerURI type:2 minInstances:1 maxInstances:1 operations:1
Resource: ID:1 name:BootstrapServer type:5 minInstances:1 maxInstances:1 operations:1
Resource: ID:2 name:SecurityMode type:3 minInstances:1 maxInstances:1 operations:1
Resource: ID:3 name:PublicKeyorIDentity type:6 minInstances:1 maxInstances:1 operations:1
Resource: ID:4 name:ServerPublicKeyorIDentity type:6 minInstances:1 maxInstances:1 operations:1
Resource: ID:5 name:SecretKey type:6 minInstances:1 maxInstances:1 operations:1
Resource: ID:6 name:SMSSecurityMode type:3 minInstances:1 maxInstances:1 operations:1
Resource: ID:7 name:SMSBindingKeyParameters type:6 minInstances:1 maxInstances:1 operations:1
Resource: ID:8 name:SMSBindingSecretKeys type:6 minInstances:1 maxInstances:1 operations:1
Resource: ID:9 name:LWM2MServerSMSNumber type:3 minInstances:1 maxInstances:1 operations:1
Resource: ID:10 name:ShortServerID type:3 minInstances:0 maxInstances:1 operations:1
Resource: ID:11 name:ClientHoldOffTime type:3 minInstances:1 maxInstances:1 operations:1
Object ID:1 name:LWM2MServer minInstances:1 maxInstances:65535
Resource: ID:0 name:ShortServerID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:Lifetime type:3 minInstances:1 maxInstances:1 operations:4
Resource: ID:2 name:DefaultMinimumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:3 name:DefaultMaximumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:4 name:Disable type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:5 name:DisableTimeout type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:6 name:NotificationStoringWhenDisabledorOffline type:5 minInstances:1 maxInstances:1 operations:4
Resource: ID:7 name:Binding type:2 minInstances:1 maxInstances:1 operations:4
Resource: ID:8 name:RegistrationUpdateTrigger type:1 minInstances:1 maxInstances:1 operations:5
Object ID:2 name:LWM2MAccessControl minInstances:0 maxInstances:65535
Resource: ID:0 name:ObjectID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:ObjectInstanceID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:2 name:ACL type:10 minInstances:0 maxInstances:65535 operations:4
Resource: ID:3 name:AccessControlOwner type:3 minInstances:1 maxInstances:1 operations:4
Object ID:3 name:Device minInstances:1 maxInstances:1
Resource: ID:0 name:Manufacturer type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:1 name:ModelNumber type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:2 name:SerialNumber type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:3 name:FirmwareVersion type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:4 name:Reboot type:1 minInstances:1 maxInstances:1 operations:5
Resource: ID:5 name:FactoryReset type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:6 name:AvailablePowerSources type:10 minInstances:0 maxInstances:65535 operations:2
Resource: ID:7 name:PowerSourceVoltage type:10 minInstances:0 maxInstances:65535 operations:2
Resource: ID:8 name:PowerSourceCurrent type:10 minInstances:0 maxInstances:65535 operations:2
Resource: ID:9 name:BatteryLevel type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:10 name:MemoryFree type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:11 name:ErrorCode type:10 minInstances:1 maxInstances:65535 operations:2
Resource: ID:12 name:ResetErrorCode type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:13 name:CurrentTime type:7 minInstances:0 maxInstances:1 operations:4
Resource: ID:14 name:UTCOffset type:2 minInstances:0 maxInstances:1 operations:4
Resource: ID:15 name:Timezone type:2 minInstances:0 maxInstances:1 operations:4
Resource: ID:16 name:SupportedBindingandModes type:2 minInstances:1 maxInstances:1 operations:2
Resource: ID:17 name:DeviceType type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:18 name:HardwareVersion type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:19 name:SoftwareVersion type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:20 name:BatteryStatus type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:21 name:MemoryTotal type:3 minInstances:0 maxInstances:1 operations:2
Object ID:4 name:ConnectivityMonitoring minInstances:0 maxInstances:1
Resource: ID:0 name:NetworkBearer type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:AvailableNetworkBearer type:10 minInstances:1 maxInstances:65535 operations:2
Resource: ID:2 name:RadioSignalStrength type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:3 name:LinkQuality type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:4 name:ADDRESSes type:9 minInstances:1 maxInstances:65535 operations:2
Resource: ID:5 name:RouterADDRESSe type:9 minInstances:0 maxInstances:65535 operations:2
Resource: ID:6 name:LinkUtilization type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:7 name:APN type:9 minInstances:0 maxInstances:65535 operations:2
Resource: ID:8 name:CellID type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:9 name:SMNC type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:10 name:SMCC type:3 minInstances:0 maxInstances:1 operations:2
Object ID:7 name:ConnectivityStatistics minInstances:0 maxInstances:1
Resource: ID:0 name:SMSTxCounter type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:1 name:SMSRxCounter type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:2 name:TxData type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:3 name:RxData type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:4 name:MaxMessageSize type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:5 name:AverageMessageSize type:3 minInstances:0 maxInstances:1 operations:2
Resource: ID:6 name:StartOrReset type:1 minInstances:1 maxInstances:1 operations:5
Object ID:5 name:FirmwareUpdate minInstances:0 maxInstances:1
Resource: ID:0 name:Package type:6 minInstances:1 maxInstances:1 operations:3
Resource: ID:1 name:PackageURI type:2 minInstances:1 maxInstances:1 operations:3
Resource: ID:2 name:Update type:1 minInstances:1 maxInstances:1 operations:5
Resource: ID:3 name:State type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:4 name:UpdateSupportedObjects type:5 minInstances:0 maxInstances:1 operations:4
Resource: ID:5 name:UpdateResult type:3 minInstances:1 maxInstances:1 operations:2
Object ID:6 name:Location minInstances:0 maxInstances:1
Resource: ID:0 name:Latitude type:2 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:Longitude type:2 minInstances:1 maxInstances:1 operations:2
Resource: ID:2 name:Altitude type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:3 name:Uncertainty type:2 minInstances:0 maxInstances:1 operations:2
Resource: ID:4 name:Velocity type:6 minInstances:0 maxInstances:1 operations:2
Resource: ID:5 name:Timestamp type:7 minInstances:1 maxInstances:1 operations:2
Object ID:1000 name:Object1000 minInstances:1 maxInstances:1
Resource: ID:100 name:Resource100 type:2 minInstances:0 maxInstances:1 operations:4
Resource: ID:101 name:Resource101 type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:102 name:Resource102 type:4 minInstances:0 maxInstances:1 operations:4
Resource: ID:103 name:Resource103 type:5 minInstances:0 maxInstances:1 operations:4
Resource: ID:104 name:Resource104 type:7 minInstances:0 maxInstances:1 operations:4
Resource: ID:105 name:Resource105 type:6 minInstances:0 maxInstances:1 operations:4
Resource: ID:106 name:Resource106 type:8 minInstances:0 maxInstances:1 operations:4
Resource: ID:107 name:Resource107 type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:200 name:Resource200 type:9 minInstances:0 maxInstances:65535 operations:4
Resource: ID:201 name:Resource201 type:10 minInstances:0 maxInstances:65535 operations:4
Resource: ID:202 name:Resource202 type:11 minInstances:0 maxInstances:65535 operations:4
Resource: ID:203 name:Resource203 type:12 minInstances:0 maxInstances:65535 operations:4
Resource: ID:204 name:Resource204 type:14 minInstances:0 maxInstances:65535 operations:4
Resource: ID:205 name:Resource205 type:13 minInstances:0 maxInstances:65535 operations:4
Resource: ID:206 name:Resource206 type:15 minInstances:0 maxInstances:65535 operations:4
Object ID:2000 name:Object2000 minInstances:1 maxInstances:65535
Resource: ID:100 name:Resource100 type:2 minInstances:0 maxInstances:1 operations:4
Resource: ID:101 name:Resource101 type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:102 name:Resource102 type:4 minInstances:0 maxInstances:1 operations:4
Resource: ID:103 name:Resource103 type:5 minInstances:0 maxInstances:1 operations:4
Resource: ID:104 name:Resource104 type:7 minInstances:0 maxInstances:1 operations:4
Resource: ID:105 name:Resource105 type:6 minInstances:0 maxInstances:1 operations:4
Resource: ID:106 name:Resource106 type:8 minInstances:0 maxInstances:1 operations:4
Resource: ID:107 name:Resource107 type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:200 name:Resource200 type:9 minInstances:0 maxInstances:65535 operations:4
Resource: ID:201 name:Resource201 type:10 minInstances:0 maxInstances:65535 operations:4
Resource: ID:202 name:Resource202 type:11 minInstances:0 maxInstances:65535 operations:4
Resource: ID:203 name:Resource203 type:12 minInstances:0 maxInstances:65535 operations:4
Resource: ID:204 name:Resource204 type:14 minInstances:0 maxInstances:65535 operations:4
Resource: ID:205 name:Resource205 type:13 minInstances:0 maxInstances:65535 operations:4
Resource: ID:206 name:Resource206 type:15 minInstances:0 maxInstances:65535 operations:4
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_object(self):
expectedStdout = """Object ID:1 name:LWM2MServer minInstances:1 maxInstances:65535
Resource: ID:0 name:ShortServerID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:Lifetime type:3 minInstances:1 maxInstances:1 operations:4
Resource: ID:2 name:DefaultMinimumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:3 name:DefaultMaximumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:4 name:Disable type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:5 name:DisableTimeout type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:6 name:NotificationStoringWhenDisabledorOffline type:5 minInstances:1 maxInstances:1 operations:4
Resource: ID:7 name:Binding type:2 minInstances:1 maxInstances:1 operations:4
Resource: ID:8 name:RegistrationUpdateTrigger type:1 minInstances:1 maxInstances:1 operations:5
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/1")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_multiple_object(self):
expectedStdout = """Object ID:1 name:LWM2MServer minInstances:1 maxInstances:65535
Resource: ID:0 name:ShortServerID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:Lifetime type:3 minInstances:1 maxInstances:1 operations:4
Resource: ID:2 name:DefaultMinimumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:3 name:DefaultMaximumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:4 name:Disable type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:5 name:DisableTimeout type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:6 name:NotificationStoringWhenDisabledorOffline type:5 minInstances:1 maxInstances:1 operations:4
Resource: ID:7 name:Binding type:2 minInstances:1 maxInstances:1 operations:4
Resource: ID:8 name:RegistrationUpdateTrigger type:1 minInstances:1 maxInstances:1 operations:5
Object ID:2 name:LWM2MAccessControl minInstances:0 maxInstances:65535
Resource: ID:0 name:ObjectID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:ObjectInstanceID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:2 name:ACL type:10 minInstances:0 maxInstances:65535 operations:4
Resource: ID:3 name:AccessControlOwner type:3 minInstances:1 maxInstances:1 operations:4
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/1 /2")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_object_instance(self):
expectedStdout = """Object ID:1 name:LWM2MServer minInstances:1 maxInstances:65535
Resource: ID:0 name:ShortServerID type:3 minInstances:1 maxInstances:1 operations:2
Resource: ID:1 name:Lifetime type:3 minInstances:1 maxInstances:1 operations:4
Resource: ID:2 name:DefaultMinimumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:3 name:DefaultMaximumPeriod type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:4 name:Disable type:1 minInstances:0 maxInstances:1 operations:5
Resource: ID:5 name:DisableTimeout type:3 minInstances:0 maxInstances:1 operations:4
Resource: ID:6 name:NotificationStoringWhenDisabledorOffline type:5 minInstances:1 maxInstances:1 operations:4
Resource: ID:7 name:Binding type:2 minInstances:1 maxInstances:1 operations:4
Resource: ID:8 name:RegistrationUpdateTrigger type:1 minInstances:1 maxInstances:1 operations:5
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/1/0")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_resource(self):
expectedStdout = """Object ID:3 name:Device minInstances:1 maxInstances:1
Resource: ID:0 name:Manufacturer type:2 minInstances:0 maxInstances:1 operations:2
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/3/0/0")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_multiple_resources(self):
expectedStdout = """Object ID:3 name:Device minInstances:1 maxInstances:1
Resource: ID:0 name:Manufacturer type:2 minInstances:0 maxInstances:1 operations:2
Object ID:4 name:ConnectivityMonitoring minInstances:0 maxInstances:1
Resource: ID:1 name:AvailableNetworkBearer type:10 minInstances:1 maxInstances:65535 operations:2
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/3/0/0 /4/0/1")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_non_exsistence_object(self):
expectedStdout = "Object 23456 not defined\n"
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/23456")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_non_exsistence_resource(self):
expectedStdout = """Object ID:3 name:Device minInstances:1 maxInstances:1
Resource 1111 not defined
"""
expectedStderr = ""
expectedCode = 0
result = client_explore(self.config, "/3/0/1111")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_invalid_path(self):
expectedStdout = ""
expectedStderr = "Target /-1/0/0 is not valid\n"
expectedCode = 0
result = client_explore(self.config, "/-1/0/0")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_invalid_object(self):
expectedStdout = ""
expectedStderr = "Target /@@adf%%% is not valid\n"
expectedCode = 0
result = client_explore(self.config, "/@@adf%%%")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("maintenance headache")
def test_explore_invalid_resource(self):
expectedStdout = ""
expectedStderr = "Target /3/0/%@%$ is not valid\n"
expectedCode = 0
result = client_explore(self.config, "/3/0/%@%$")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
class TestExploreBasic(tools_common.BasicTestCase):
@unittest.skip("maintenance headache")
def test_get_help(self):
expectedStdout = """awa-client-explore 1.0
LWM2M Client Objects/Resource Explore Operation
Usage: awa-client-explore [OPTIONS]... [PATHS]...
-h, --help Print help and exit
-V, --version Print version and exit
-v, --verbose Increase program verbosity (default=off)
-d, --debug Increase program verbosity (default=off)
-a, --ipcAddress=ADDRESS Connect to Client IPC Address
(default=`127.0.0.1')
-p, --ipcPort=PORT Connect to Client IPC port (default=`12345')
-q, --quiet Print values only (quiet) (default=off)
Specify one or more object, object instance and resource paths
in the format "/O/I/R", separated by spaces. For example:
/3 /4 /4/0/7 /5/0/0 /5
"""
expectedStderr = ""
expectedCode = 0
helpOptions = ("--help","-h")
for option in helpOptions:
code, stdout, stderr = client_explore(self.config, option)
self.assertEqual(expectedStdout, stdout)
self.assertEqual(expectedStderr, stderr)
self.assertEqual(expectedCode, code)
@unittest.skip("maintenance headache")
def test_get_version(self):
expectedStdout = "awa-client-explore 1.0\n"
expectedStderr = ""
expectedCode = 0
versionOptions = ("--version","-V")
for option in versionOptions:
code, stdout, stderr = client_explore(self.config, option)
self.assertEqual(expectedStdout, stdout)
self.assertEqual(expectedStderr, stderr)
self.assertEqual(expectedCode, code)
| |
import atexit
import datetime
from distutils.version import StrictVersion
from os import environ as env
import os
import subprocess
import sys
import seesaw
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import LimitConcurrent
from seesaw.util import find_executable
# FIXME: This is a bit of a hack.
#
# Pipeline scripts are run with pwd set to their directory, which is why
# getcwd will (often) return the Right Thing. A more robust solution would be
# nice, though.
sys.path.append(os.getcwd())
from archivebot import control
from archivebot import shared_config
from archivebot.seesaw import extensions
from archivebot.seesaw import monitoring
from archivebot.seesaw.preflight import check_wpull_args
from archivebot.seesaw.dnspythoncrash import test as dnspython_crash_fixed
from archivebot.seesaw.wpull import WpullArgs
from archivebot.seesaw.tasks import GetItemFromQueue, StartHeartbeat, \
SetFetchDepth, PreparePaths, Wpull, CompressLogIfFailed, WriteInfo, DownloadUrlFile, \
RelabelIfAborted, MoveFiles, StopHeartbeat, MarkItemAsDone, CheckIP, CheckLocalWebserver
WPULL_VERSION = ('2.0.3')
EXPIRE_TIME = 60 * 60 * 48 # 48 hours between archive requests
WPULL_EXE = find_executable('Wpull', WPULL_VERSION, ['wpull', './wpull'], '--version')
YOUTUBE_DL = find_executable('youtube-dl', None, ['./youtube-dl'], '--version')
version_integer = (sys.version_info.major * 10) + sys.version_info.minor
assert version_integer >= 33, \
"This pipeline requires Python >= 3.3. You are running %s." % \
sys.version
if not os.environ.get('NO_SEGFAULT_340'):
assert sys.version_info[:3] != (3, 4, 0), \
"Python 3.4.0 should not be used. It may segfault. " \
"Set NO_SEGFAULT_340=1 if your Python is patched. " \
"See https://bugs.python.org/issue21435"
assert WPULL_EXE, 'No usable Wpull found.'
assert YOUTUBE_DL, 'No usable youtube-dl found.'
assert 'REDIS_URL' in env, 'REDIS_URL not set.'
assert 'FINISHED_WARCS_DIR' in env, 'FINISHED_WARCS_DIR not set.'
if 'WARC_MAX_SIZE' in env:
WARC_MAX_SIZE = env['WARC_MAX_SIZE']
else:
WARC_MAX_SIZE = '5368709120'
WPULL_MONITOR_DISK = env.get('WPULL_MONITOR_DISK', '5120m')
WPULL_MONITOR_MEMORY = env.get('WPULL_MONITOR_MEMORY', '50m')
assert 'TMUX' in env or 'STY' in env or env.get('NO_SCREEN') == "1", \
"Refusing to start outside of screen or tmux, set NO_SCREEN=1 to override"
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.8b1"):
raise Exception(
"Needs seesaw@python3/development version 0.1.8b1 or higher. "
"You have version {0}".format(seesaw.__version__)
)
assert downloader not in ('ignorednick', 'YOURNICKHERE'), 'please use a real nickname'
assert datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo.utcoffset(None).seconds == 0, 'Please set the time zone to UTC'
assert dnspython_crash_fixed(), 'Broken crash-prone dnspython found'
REDIS_URL = env['REDIS_URL']
LOG_CHANNEL = shared_config.log_channel()
PIPELINE_CHANNEL = shared_config.pipeline_channel()
OPENSSL_CONF = env.get('OPENSSL_CONF')
TMPDIR = env.get('TMPDIR')
# ------------------------------------------------------------------------------
# CONTROL CONNECTION
# ------------------------------------------------------------------------------
control = control.Control(REDIS_URL, LOG_CHANNEL, PIPELINE_CHANNEL)
# ------------------------------------------------------------------------------
# SEESAW EXTENSIONS
# ------------------------------------------------------------------------------
extensions.install_stdout_extension(control)
# ------------------------------------------------------------------------------
# PIPELINE
# ------------------------------------------------------------------------------
project = Project(
title = "ArchiveBot request handler"
)
#FIXME: Same hack as above; seesaw executes pipeline.py with the pipeline dir as the cwd.
# __file__ can't be used because seesaw exec()s the file contents rather than importing the file.
REPO_DIRECTORY = os.path.dirname(os.path.realpath('.'))
def pipeline_version():
# Returns something like 20190820.5cd1e38
output = subprocess.check_output(['git', 'show', '-s', '--format=format:%cd.%h', '--date=format:%Y%m%d'], cwd = REPO_DIRECTORY)
return output.decode('utf-8').strip()
def wpull_version():
output = subprocess.check_output([WPULL_EXE, '--version'],
stderr=subprocess.STDOUT)
return output.decode('utf-8').strip()
class AcceptAny:
def __contains__(self, item):
return True
VERSION = pipeline_version()
DEFAULT_USER_AGENT = \
'ArchiveTeam ArchiveBot/%s (wpull %s) and not Mozilla/5.0 ' \
'(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/42.0.2311.90 Safari/537.36' % (VERSION, wpull_version())
_, _, _, pipeline_id = monitoring.pipeline_id()
wpull_args = WpullArgs(
default_user_agent=DEFAULT_USER_AGENT,
wpull_exe=WPULL_EXE,
youtube_dl_exe=YOUTUBE_DL,
finished_warcs_dir=os.environ["FINISHED_WARCS_DIR"],
warc_max_size=WARC_MAX_SIZE,
monitor_disk=WPULL_MONITOR_DISK,
monitor_memory=WPULL_MONITOR_MEMORY,
)
check_wpull_args(wpull_args)
wpull_env = dict(os.environ)
wpull_env['ITEM_IDENT'] = ItemInterpolation('%(ident)s')
wpull_env['LOG_KEY'] = ItemInterpolation('%(log_key)s')
wpull_env['REDIS_URL'] = REDIS_URL
if OPENSSL_CONF:
wpull_env['OPENSSL_CONF'] = OPENSSL_CONF
if TMPDIR:
wpull_env['TMPDIR'] = TMPDIR
pipeline = Pipeline(
CheckIP(),
CheckLocalWebserver(),
GetItemFromQueue(control, pipeline_id, downloader,
ao_only=env.get('AO_ONLY'), large=env.get('LARGE'),
version_check = (VERSION, pipeline_version)),
StartHeartbeat(control),
SetFetchDepth(),
PreparePaths(),
WriteInfo(),
DownloadUrlFile(control),
Wpull(
wpull_args,
accept_on_exit_code=AcceptAny(),
env=wpull_env,
),
RelabelIfAborted(control),
CompressLogIfFailed(),
WriteInfo(),
MoveFiles(target_directory = os.environ["FINISHED_WARCS_DIR"]),
StopHeartbeat(),
MarkItemAsDone(control, EXPIRE_TIME)
)
def stop_control():
#control.flag_logging_thread_for_termination()
control.unregister_pipeline(pipeline_id)
pipeline.on_cleanup += stop_control
pipeline.running_status = "Running"
def status_running():
pipeline.running_status = "Running"
pipeline.on_stop_canceled += status_running
def status_stopping():
pipeline.running_status = "Stopping"
pipeline.on_stop_requested += status_stopping
# Activate system monitoring.
monitoring.start(pipeline, control, VERSION, downloader)
print('*' * 60)
print('Pipeline ID: %s' % pipeline_id)
if env.get('AO_ONLY'):
print('!ao-only mode enabled; pipeline will accept jobs queued with !ao '
'(and not jobs queued with !a or --pipeline)')
elif env.get('LARGE'):
print('large mode enabled; pipeline will accept jobs queued with !a'
' --large')
elif env.get('LARGE') and env.get('AO_ONLY'):
print('!ao-only and large modes enabled. THIS IS PROBABLY A MISTAKE. '
' Pipeline will accept only jobs queued with --large or !ao.')
print('*' * 60)
print()
# vim:ts=4:sw=4:et:tw=78
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AppliedReservationList
from ._models_py3 import AppliedReservations
from ._models_py3 import AvailableScopeProperties
from ._models_py3 import AvailableScopeRequest
from ._models_py3 import AvailableScopeRequestProperties
from ._models_py3 import BillingInformation
from ._models_py3 import CalculateExchangeOperationResultResponse
from ._models_py3 import CalculateExchangeRequest
from ._models_py3 import CalculateExchangeRequestProperties
from ._models_py3 import CalculateExchangeResponseProperties
from ._models_py3 import CalculatePriceResponse
from ._models_py3 import CalculatePriceResponseProperties
from ._models_py3 import CalculatePriceResponsePropertiesBillingCurrencyTotal
from ._models_py3 import CalculatePriceResponsePropertiesPricingCurrencyTotal
from ._models_py3 import Catalog
from ._models_py3 import CreateGenericQuotaRequestParameters
from ._models_py3 import CurrentQuotaLimit
from ._models_py3 import CurrentQuotaLimitBase
from ._models_py3 import Error
from ._models_py3 import ExceptionResponse
from ._models_py3 import ExchangeOperationResultResponse
from ._models_py3 import ExchangePolicyError
from ._models_py3 import ExchangePolicyErrors
from ._models_py3 import ExchangeRequest
from ._models_py3 import ExchangeRequestProperties
from ._models_py3 import ExchangeResponseProperties
from ._models_py3 import ExtendedErrorInfo
from ._models_py3 import ExtendedStatusInfo
from ._models_py3 import MergeRequest
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationList
from ._models_py3 import OperationResponse
from ._models_py3 import OperationResultError
from ._models_py3 import Patch
from ._models_py3 import PatchPropertiesRenewProperties
from ._models_py3 import PaymentDetail
from ._models_py3 import Price
from ._models_py3 import PurchaseRequest
from ._models_py3 import PurchaseRequestPropertiesReservedResourceProperties
from ._models_py3 import QuotaLimits
from ._models_py3 import QuotaLimitsResponse
from ._models_py3 import QuotaProperties
from ._models_py3 import QuotaRequestDetails
from ._models_py3 import QuotaRequestDetailsList
from ._models_py3 import QuotaRequestOneResourceSubmitResponse
from ._models_py3 import QuotaRequestProperties
from ._models_py3 import QuotaRequestSubmitResponse
from ._models_py3 import QuotaRequestSubmitResponse201
from ._models_py3 import RenewPropertiesResponse
from ._models_py3 import RenewPropertiesResponseBillingCurrencyTotal
from ._models_py3 import RenewPropertiesResponsePricingCurrencyTotal
from ._models_py3 import ReservationList
from ._models_py3 import ReservationMergeProperties
from ._models_py3 import ReservationOrderBillingPlanInformation
from ._models_py3 import ReservationOrderList
from ._models_py3 import ReservationOrderResponse
from ._models_py3 import ReservationProperties
from ._models_py3 import ReservationResponse
from ._models_py3 import ReservationSplitProperties
from ._models_py3 import ReservationToExchange
from ._models_py3 import ReservationToPurchaseCalculateExchange
from ._models_py3 import ReservationToPurchaseExchange
from ._models_py3 import ReservationToReturn
from ._models_py3 import ReservationToReturnForExchange
from ._models_py3 import ResourceName
from ._models_py3 import ScopeProperties
from ._models_py3 import ServiceError
from ._models_py3 import ServiceErrorDetail
from ._models_py3 import SkuName
from ._models_py3 import SkuProperty
from ._models_py3 import SkuRestriction
from ._models_py3 import SplitRequest
from ._models_py3 import SubRequest
from ._models_py3 import SubscriptionScopeProperties
except (SyntaxError, ImportError):
from ._models import AppliedReservationList # type: ignore
from ._models import AppliedReservations # type: ignore
from ._models import AvailableScopeProperties # type: ignore
from ._models import AvailableScopeRequest # type: ignore
from ._models import AvailableScopeRequestProperties # type: ignore
from ._models import BillingInformation # type: ignore
from ._models import CalculateExchangeOperationResultResponse # type: ignore
from ._models import CalculateExchangeRequest # type: ignore
from ._models import CalculateExchangeRequestProperties # type: ignore
from ._models import CalculateExchangeResponseProperties # type: ignore
from ._models import CalculatePriceResponse # type: ignore
from ._models import CalculatePriceResponseProperties # type: ignore
from ._models import CalculatePriceResponsePropertiesBillingCurrencyTotal # type: ignore
from ._models import CalculatePriceResponsePropertiesPricingCurrencyTotal # type: ignore
from ._models import Catalog # type: ignore
from ._models import CreateGenericQuotaRequestParameters # type: ignore
from ._models import CurrentQuotaLimit # type: ignore
from ._models import CurrentQuotaLimitBase # type: ignore
from ._models import Error # type: ignore
from ._models import ExceptionResponse # type: ignore
from ._models import ExchangeOperationResultResponse # type: ignore
from ._models import ExchangePolicyError # type: ignore
from ._models import ExchangePolicyErrors # type: ignore
from ._models import ExchangeRequest # type: ignore
from ._models import ExchangeRequestProperties # type: ignore
from ._models import ExchangeResponseProperties # type: ignore
from ._models import ExtendedErrorInfo # type: ignore
from ._models import ExtendedStatusInfo # type: ignore
from ._models import MergeRequest # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationList # type: ignore
from ._models import OperationResponse # type: ignore
from ._models import OperationResultError # type: ignore
from ._models import Patch # type: ignore
from ._models import PatchPropertiesRenewProperties # type: ignore
from ._models import PaymentDetail # type: ignore
from ._models import Price # type: ignore
from ._models import PurchaseRequest # type: ignore
from ._models import PurchaseRequestPropertiesReservedResourceProperties # type: ignore
from ._models import QuotaLimits # type: ignore
from ._models import QuotaLimitsResponse # type: ignore
from ._models import QuotaProperties # type: ignore
from ._models import QuotaRequestDetails # type: ignore
from ._models import QuotaRequestDetailsList # type: ignore
from ._models import QuotaRequestOneResourceSubmitResponse # type: ignore
from ._models import QuotaRequestProperties # type: ignore
from ._models import QuotaRequestSubmitResponse # type: ignore
from ._models import QuotaRequestSubmitResponse201 # type: ignore
from ._models import RenewPropertiesResponse # type: ignore
from ._models import RenewPropertiesResponseBillingCurrencyTotal # type: ignore
from ._models import RenewPropertiesResponsePricingCurrencyTotal # type: ignore
from ._models import ReservationList # type: ignore
from ._models import ReservationMergeProperties # type: ignore
from ._models import ReservationOrderBillingPlanInformation # type: ignore
from ._models import ReservationOrderList # type: ignore
from ._models import ReservationOrderResponse # type: ignore
from ._models import ReservationProperties # type: ignore
from ._models import ReservationResponse # type: ignore
from ._models import ReservationSplitProperties # type: ignore
from ._models import ReservationToExchange # type: ignore
from ._models import ReservationToPurchaseCalculateExchange # type: ignore
from ._models import ReservationToPurchaseExchange # type: ignore
from ._models import ReservationToReturn # type: ignore
from ._models import ReservationToReturnForExchange # type: ignore
from ._models import ResourceName # type: ignore
from ._models import ScopeProperties # type: ignore
from ._models import ServiceError # type: ignore
from ._models import ServiceErrorDetail # type: ignore
from ._models import SkuName # type: ignore
from ._models import SkuProperty # type: ignore
from ._models import SkuRestriction # type: ignore
from ._models import SplitRequest # type: ignore
from ._models import SubRequest # type: ignore
from ._models import SubscriptionScopeProperties # type: ignore
from ._azure_reservation_api_enums import (
AppliedScopeType,
CalculateExchangeOperationResultStatus,
ErrorResponseCode,
ExchangeOperationResultStatus,
InstanceFlexibility,
OperationStatus,
PaymentStatus,
QuotaRequestState,
ReservationBillingPlan,
ReservationStatusCode,
ReservationTerm,
ReservedResourceType,
ResourceType,
)
__all__ = [
'AppliedReservationList',
'AppliedReservations',
'AvailableScopeProperties',
'AvailableScopeRequest',
'AvailableScopeRequestProperties',
'BillingInformation',
'CalculateExchangeOperationResultResponse',
'CalculateExchangeRequest',
'CalculateExchangeRequestProperties',
'CalculateExchangeResponseProperties',
'CalculatePriceResponse',
'CalculatePriceResponseProperties',
'CalculatePriceResponsePropertiesBillingCurrencyTotal',
'CalculatePriceResponsePropertiesPricingCurrencyTotal',
'Catalog',
'CreateGenericQuotaRequestParameters',
'CurrentQuotaLimit',
'CurrentQuotaLimitBase',
'Error',
'ExceptionResponse',
'ExchangeOperationResultResponse',
'ExchangePolicyError',
'ExchangePolicyErrors',
'ExchangeRequest',
'ExchangeRequestProperties',
'ExchangeResponseProperties',
'ExtendedErrorInfo',
'ExtendedStatusInfo',
'MergeRequest',
'OperationDisplay',
'OperationList',
'OperationResponse',
'OperationResultError',
'Patch',
'PatchPropertiesRenewProperties',
'PaymentDetail',
'Price',
'PurchaseRequest',
'PurchaseRequestPropertiesReservedResourceProperties',
'QuotaLimits',
'QuotaLimitsResponse',
'QuotaProperties',
'QuotaRequestDetails',
'QuotaRequestDetailsList',
'QuotaRequestOneResourceSubmitResponse',
'QuotaRequestProperties',
'QuotaRequestSubmitResponse',
'QuotaRequestSubmitResponse201',
'RenewPropertiesResponse',
'RenewPropertiesResponseBillingCurrencyTotal',
'RenewPropertiesResponsePricingCurrencyTotal',
'ReservationList',
'ReservationMergeProperties',
'ReservationOrderBillingPlanInformation',
'ReservationOrderList',
'ReservationOrderResponse',
'ReservationProperties',
'ReservationResponse',
'ReservationSplitProperties',
'ReservationToExchange',
'ReservationToPurchaseCalculateExchange',
'ReservationToPurchaseExchange',
'ReservationToReturn',
'ReservationToReturnForExchange',
'ResourceName',
'ScopeProperties',
'ServiceError',
'ServiceErrorDetail',
'SkuName',
'SkuProperty',
'SkuRestriction',
'SplitRequest',
'SubRequest',
'SubscriptionScopeProperties',
'AppliedScopeType',
'CalculateExchangeOperationResultStatus',
'ErrorResponseCode',
'ExchangeOperationResultStatus',
'InstanceFlexibility',
'OperationStatus',
'PaymentStatus',
'QuotaRequestState',
'ReservationBillingPlan',
'ReservationStatusCode',
'ReservationTerm',
'ReservedResourceType',
'ResourceType',
]
| |
"""
rgenetics datatypes
Ross Lazarus
for the rgenetics and galaxy projects
genome graphs datatypes derived from Interval datatypes
genome graphs datasets have a header row with appropriate columnames
The first column is always the marker - eg columname = rs, first row= rs12345 if the rows are snps
subsequent row values are all numeric ! Will fail if any non numeric (eg '+' or 'NA') values
ross lazarus for rgenetics
august 20 2007
"""
import logging, os, sys, time, tempfile, shutil, string, glob
import data
from galaxy import util
from cgi import escape
import urllib, binascii
from galaxy.web import url_for
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.data import Text
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.images import Html
from galaxy.datatypes.interval import Interval
from galaxy.util.hash_util import *
log = logging.getLogger(__name__)
class GenomeGraphs(Interval):
"""gg version viewable at ucsc of Gff format"""
file_ext = "gg"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group' ]
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str','str','str','int','int','int','str','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=4, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=5, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
###do we need to repeat these? they are the same as should be inherited from interval type
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Interval.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def as_ucsc_display_file( self, dataset, **kwd ):
return open( dataset.file_name )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len(elems) == 9:
try:
int( elems[3] )
int( elems[4] )
break
except:
pass
Interval.set_meta( self, dataset, overwrite = overwrite, skip = i )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
if True or (dataset.has_data() and dataset.state == dataset.states.OK):
try:
seqid = ''
start = 2147483647 # Maximum value of a signed 32 bit integer ( 2**31 - 1 )
stop = 0
for i, line in enumerate( file( dataset.file_name ) ):
if i == 0: # track stuff there
continue
line = line.rstrip( '\r\n' )
if not line:
continue
if not line.startswith( '#' ):
elems = line.split( '\t' )
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min( start, int( elems[3] ) )
stop = max( stop, int( elems[4] ) )
else:
# We've spanned a chromosome
break
if i > 10: # span 10 features
break
except:
seqid, start, stop = ( '', '', '' )
return ( seqid, str( start ), str( stop ) )
else:
return ( '', '', '' )
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport( dataset )
seqid = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if seqid and start and stop:
for site_name, site_url in util.get_gbrowse_sites_by_build( dataset.dbkey ):
if site_name in app.config.gbrowse_display_sites:
link = "%s?start=%s&stop=%s&ref=%s&dbkey=%s" % ( site_url, start, stop, seqid, dataset.dbkey )
ret_val.append( ( site_name, link ) )
return ret_val
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if start == '' or int(start) < 1:
start='1'
if stop == '' or int(stop) <= start:
stop = '%d' % (int(start) + 10000)
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
# HACK: UCSC doesn't support https, so force http even
# if our URL scheme is https. Making this work
# requires additional hackery in your upstream proxy.
# If UCSC ever supports https, remove this hack.
internal_url = "%s" % url_for( controller='dataset',
dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name )
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % (site_url, dataset.dbkey, chrom, start, stop) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
else:
log.debug('@@@ gg ucsc_links - no viewport_tuple')
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
"""
f = open(filename,'r')
headers = f.readline().split
if headers[0].lower() == 'track':
headers = f.readline.split()
#headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in data.valid_strand:
return False
return True
except:
return False
class rgTabList(Tabular):
"""
for sampleid and for featureid lists of exclusions or inclusions in the clean tool
featureid subsets on statistical criteria -> specialized display such as gg
"""
file_ext = "rgTList"
def __init__(self, **kwd):
"""Initialize featurelistt datatype"""
Tabular.__init__( self, **kwd )
self.column_names = []
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
if dataset.metadata.columns - len( self.column_names ) > 0:
for i in range( len( self.column_names ), dataset.metadata.columns ):
out.append( '<th>%s</th>' % str( i+1 ) )
out.append( '</tr>' )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
class rgSampleList(rgTabList):
"""
for sampleid exclusions or inclusions in the clean tool
output from QC eg excess het, gender error, ibd pair member,eigen outlier,excess mendel errors,...
since they can be uploaded, should be flexible
but they are persistent at least
same infrastructure for expression?
"""
file_ext = "rgSList"
def __init__(self, **kwd):
"""
Initialize samplelist datatype
"""
rgTabList.__init__( self, **kwd )
self.column_names[0] = 'FID'
self.column_names[1] = 'IID'
# this is what Plink wants as at 2009
def sniff(self,filename):
"""
"""
infile = open(dataset.file_name, "r")
header= infile.next() # header
if header[0] == 'FID' and header[1] == 'IID':
return True
else:
return False
class rgFeatureList( rgTabList ):
"""
for featureid lists of exclusions or inclusions in the clean tool
output from QC eg low maf, high missingness, bad hwe in controls, excess mendel errors,...
featureid subsets on statistical criteria -> specialized display such as gg
same infrastructure for expression?
"""
file_ext = "rgFList"
def __init__(self, **kwd):
"""Initialize featurelist datatype"""
rgTabList.__init__( self, **kwd )
for i,s in enumerate(['#FeatureId', 'Chr', 'Genpos', 'Mappos']):
self.column_names[i] = s
class Rgenetics(Html):
"""
class to use for rgenetics
"""
MetadataElement( name="base_name", desc="base name for all transformed versions of this genetic dataset", default="rgenetics", readonly=True, set_in_upload=True)
composite_type = 'auto_primary_file'
allow_datatype_change = False
file_ext = 'rgenetics'
def missing_meta( self, dataset=None, **kwargs):
"""Checks for empty meta values"""
for key, value in dataset.metadata.items():
if not value:
return True
return False
def generate_primary_file( self, dataset = None ):
rval = ['<html><head><title>Rgenetics Galaxy Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files( dataset = dataset ).iteritems():
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
rval.append( '<li><a href="%s" type="application/binary">%s</a>%s' % ( composite_name, composite_name, opt_text ) )
rval.append( '</ul></div></html>' )
return "\n".join( rval )
def regenerate_primary_file(self,dataset):
"""
cannot do this until we are setting metadata
"""
def fix(oldpath,newbase):
old,e = os.path.splitext(oldpath)
head,rest = os.path.split(old)
newpath = os.path.join(head,newbase)
newpath = '%s%s' % (newpath,e)
if oldpath <> newpath:
shutil.move(oldpath,newpath)
return newpath
bn = dataset.metadata.base_name
efp = dataset.extra_files_path
flist = os.listdir(efp)
proper_base = bn
rval = ['<html><head><title>Files for Composite Dataset %s</title></head><p/>Comprises the following files:<p/><ul>' % (bn)]
for i,fname in enumerate(flist):
newpath = fix(os.path.join(efp,fname),proper_base)
sfname = os.path.split(newpath)[-1]
rval.append( '<li><a href="%s">%s</a>' % ( sfname, sfname ) )
rval.append( '</ul></html>' )
f = file(dataset.file_name,'w')
f.write("\n".join( rval ))
f.write('\n')
f.close()
def set_meta( self, dataset, **kwd ):
"""
for lped/pbed eg
"""
if kwd.get('overwrite') == False:
#log.debug('@@@ rgenetics set_meta called with overwrite = False')
return True
try:
efp = dataset.extra_files_path
except:
#log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0], dataset.name))
return False
try:
flist = os.listdir(efp)
except:
#log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0],dataset.name))
return False
if len(flist) == 0:
#log.debug('@@@rgenetics set_meta failed - %s efp %s is empty?' % (dataset.name,efp))
return False
bn = None
for f in flist:
n,e = os.path.splitext(f)[0]
if (not bn) and e in ('.ped','.map','.bim','.fam'):
bn = n
dataset.metadata.base_name = bn
if not bn:
bn = '?'
self.regenerate_primary_file(dataset)
if not dataset.info:
dataset.info = 'Galaxy genotype datatype object'
if not dataset.blurb:
dataset.blurb = 'Composite file - Rgenetics Galaxy toolkit'
return True
class SNPMatrix(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snpmatrix"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Binary RGenetics file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self,filename):
"""
need to check the file header hex code
"""
infile = open(dataset.file_name, "b")
head = infile.read(16)
head = [hex(x) for x in head]
if head <> '':
return False
else:
return True
class Lped(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lped"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.ped', description = 'Pedigree File', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', description = 'Map File', substitute_name_with_metadata = 'base_name', is_binary = True )
class Pphe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pphe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.pphe', description = 'Plink Phenotype File', substitute_name_with_metadata = 'base_name' )
class Lmap(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lmap"
class Fphe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fphe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.fphe', description = 'FBAT Phenotype File', substitute_name_with_metadata = 'base_name' )
class Phe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="phe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.phe', description = 'Phenotype File', substitute_name_with_metadata = 'base_name' )
class Fped(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fped"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.fped', description = 'FBAT format pedfile', substitute_name_with_metadata = 'base_name' )
class Pbed(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pbed"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.bim', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.bed', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.fam', substitute_name_with_metadata = 'base_name', is_binary = True )
class Eigenstratgeno(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratgeno"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.eigenstratgeno', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.ind', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', substitute_name_with_metadata = 'base_name', is_binary = True )
class Eigenstratpca(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratpca"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.eigenstratpca', description = 'Eigenstrat PCA file', substitute_name_with_metadata = 'base_name' )
class Snptest(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snptest"
class Pheno(Tabular):
"""
base class for pheno files
"""
file_ext = 'pheno'
class RexpBase( Html ):
"""
base class for BioC data structures in Galaxy
must be constructed with the pheno data in place since that
goes into the metadata for each instance
"""
MetadataElement( name="columns", default=0, desc="Number of columns", visible=True )
MetadataElement( name="column_names", default=[], desc="Column names", visible=True )
MetadataElement(name="pheCols",default=[],desc="Select list for potentially interesting variables",visible=True)
MetadataElement( name="base_name",
desc="base name for all transformed versions of this expression dataset", default='rexpression', set_in_upload=True)
MetadataElement( name="pheno_path", desc="Path to phenotype data for this experiment", default="rexpression.pheno", visible=True)
file_ext = 'rexpbase'
html_table = None
is_binary = True
composite_type = 'auto_primary_file'
allow_datatype_change = False
def __init__( self, **kwd ):
Html.__init__(self,**kwd)
self.add_composite_file( '%s.pheno', description = 'Phenodata tab text file',
substitute_name_with_metadata = 'base_name', is_binary=True)
def generate_primary_file( self, dataset = None ):
"""
This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
return '<html><head></head><body>AutoGenerated Primary File for Composite Dataset</body></html>'
def get_phecols(self, phenolist=[], maxConc=20):
"""
sept 2009: cannot use whitespace to split - make a more complex structure here
and adjust the methods that rely on this structure
return interesting phenotype column names for an rexpression eset or affybatch
to use in array subsetting and so on. Returns a data structure for a
dynamic Galaxy select parameter.
A column with only 1 value doesn't change, so is not interesting for
analysis. A column with a different value in every row is equivalent to a unique
identifier so is also not interesting for anova or limma analysis - both these
are removed after the concordance (count of unique terms) is constructed for each
column. Then a complication - each remaining pair of columns is tested for
redundancy - if two columns are always paired, then only one is needed :)
"""
for nrows,row in enumerate(phenolist): # construct concordance
if len(row.strip()) == 0:
break
row = row.strip().split('\t')
if nrows == 0: # set up from header
head = row
totcols = len(row)
concordance = [{} for x in head] # list of dicts
else:
for col,code in enumerate(row): # keep column order correct
if col >= totcols:
log.warning('### get_phecols error in pheno file - row %d col %d (%s) longer than header %s' % (nrows, col, row, head))
else:
concordance[col].setdefault(code,0) # first one is zero
concordance[col][code] += 1
useCols = []
useConc = [] # columns of interest to keep
nrows = len(phenolist)
nrows -= 1 # drop head from count
for c,conc in enumerate(concordance): # c is column number
if (len(conc) > 1) and (len(conc) < min(nrows,maxConc)): # not all same and not all different!!
useConc.append(conc) # keep concordance
useCols.append(c) # keep column
nuse = len(useCols)
# now to check for pairs of concordant columns - drop one of these.
delme = []
p = phenolist[1:] # drop header
plist = [x.strip().split('\t') for x in p] # list of lists
phe = [[x[i] for i in useCols] for x in plist if len(x) >= totcols] # strip unused data
for i in range(0,(nuse-1)): # for each interesting column
for j in range(i+1,nuse):
kdict = {}
for row in phe: # row is a list of lists
k = '%s%s' % (row[i],row[j]) # composite key
kdict[k] = k
if (len(kdict.keys()) == len(concordance[useCols[j]])): # i and j are always matched
delme.append(j)
delme = list(set(delme)) # remove dupes
listCol = []
delme.sort()
delme.reverse() # must delete from far end!
for i in delme:
del useConc[i] # get rid of concordance
del useCols[i] # and usecols entry
for i,conc in enumerate(useConc): # these are all unique columns for the design matrix
ccounts = [(conc.get(code,0),code) for code in conc.keys()] # decorate
ccounts.sort()
cc = [(x[1],x[0]) for x in ccounts] # list of code count tuples
codeDetails = (head[useCols[i]],cc) # ('foo',[('a',3),('b',11),..])
listCol.append(codeDetails)
if len(listCol) > 0:
res = listCol
# metadata.pheCols becomes [('bar;22,zot;113','foo'), ...]
else:
res = [('no usable phenotype columns found',[('?',0),]),]
return res
def get_pheno(self,dataset):
"""
expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and adds the row.name in
the header so the columns are all wrong - unless you tell it not to.
A file can be written as
write.table(file='foo.pheno',pData(foo),sep='\t',quote=F,row.names=F)
"""
p = file(dataset.metadata.pheno_path,'r').readlines()
if len(p) > 0: # should only need to fix an R pheno file once
head = p[0].strip().split('\t')
line1 = p[1].strip().split('\t')
if len(head) < len(line1):
head.insert(0,'ChipFileName') # fix R write.table b0rken-ness
p[0] = '\t'.join(head)
else:
p = []
return '\n'.join(p)
def set_peek( self, dataset, is_multi_byte=False ):
"""
expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and does not include the row.name in
the header. why?
"""
if not dataset.dataset.purged:
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
p = file(pp,'r').readlines()
except:
p = ['##failed to find %s' % pp,]
dataset.peek = ''.join(p[:5])
dataset.blurb = 'Galaxy Rexpression composite file'
else:
dataset.peek = 'file does not exist\n'
dataset.blurb = 'file purged from disk'
def get_peek( self, dataset ):
"""expects a .pheno file in the extra_files_dir - ugh"""
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
p = file(pp,'r').readlines()
except:
p = ['##failed to find %s' % pp]
return ''.join(p[:5])
def get_file_peek(self,filename):
"""
can't really peek at a filename - need the extra_files_path and such?
"""
h = '## rexpression get_file_peek: no file found'
try:
h = file(filename,'r').readlines()
except:
pass
return ''.join(h[:5])
def regenerate_primary_file(self,dataset):
"""cannot do this until we are setting metadata
"""
bn = dataset.metadata.base_name
flist = os.listdir(dataset.extra_files_path)
rval = ['<html><head><title>Files for Composite Dataset %s</title></head><p/>Comprises the following files:<p/><ul>' % (bn)]
for i,fname in enumerate(flist):
sfname = os.path.split(fname)[-1]
rval.append( '<li><a href="%s">%s</a>' % ( sfname, sfname ) )
rval.append( '</ul></html>' )
f = file(dataset.file_name,'w')
f.write("\n".join( rval ))
f.write('\n')
f.close()
def init_meta( self, dataset, copy_from=None ):
"""Add metadata elements"""
if copy_from:
dataset.metadata = copy_from.metadata
def set_meta( self, dataset, **kwd ):
"""
NOTE we apply the tabular machinary to the phenodata extracted
from a BioC eSet or affybatch.
"""
try:
flist = os.listdir(dataset.extra_files_path)
except:
#log.debug('@@@rexpression set_meta failed - no dataset?')
return False
bn = None
for f in flist:
n = os.path.splitext(f)[0]
if not bn:
bn = n
dataset.metadata.base_name = bn
if not bn:
bn = '?'
pn = '%s.pheno' % (bn)
pp = os.path.join(dataset.extra_files_path,pn)
dataset.metadata.pheno_path=pp
try:
pf = file(pp,'r').readlines() # read the basename.phenodata in the extra_files_path
except:
pf = None
if pf:
h = pf[0].strip()
h = h.split('\t') # hope is header
h = [escape(x) for x in h]
dataset.metadata.column_names = h
dataset.metadata.columns = len(h)
dataset.peek = ''.join(pf[:5])
else:
dataset.metadata.column_names = []
dataset.metadata.columns = 0
dataset.peek = 'No pheno file found'
if len(pf) > 1:
dataset.metadata.pheCols = self.get_phecols(phenolist=pf)
else:
dataset.metadata.pheCols = [('','No useable phenotypes found',False),]
#self.regenerate_primary_file(dataset)
if not dataset.info:
dataset.info = 'Galaxy Expression datatype object'
if not dataset.blurb:
dataset.blurb = 'R loadable BioC expression object for the Rexpression Galaxy toolkit'
return True
def make_html_table( self, pp='nothing supplied from peek\n'):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">',]
p = pp.split('\n')
try:
# Generate column header
for i,row in enumerate(p):
lrow = row.strip().split('\t')
if i == 0:
orow = ['<th>%s</th>' % escape(x) for x in lrow]
orow.insert(0,'<tr>')
orow.append('</tr>')
else:
orow = ['<td>%s</td>' % escape(x) for x in lrow]
orow.insert(0,'<tr>')
orow.append('</tr>')
out.append(''.join(orow))
out.append( '</table>' )
out = "\n".join( out )
except Exception, exc:
out = "Can't create html table %s" % str( exc )
return out
def display_peek( self, dataset ):
"""Returns formatted html of peek"""
out=self.make_html_table(dataset.peek)
return out
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'text/html'
class Affybatch( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "affybatch"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.affybatch', description = 'AffyBatch R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary=True )
class Eset( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "eset"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.eset', description = 'ESet R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
class MAlist( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "malist"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.malist', description = 'MAlist R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from robot.errors import DataError
from robot.output import LOGGER
from robot.utils import abspath, find_file, get_error_details, NormalizedDict
from .variables import Variables
class VariableScopes(object):
def __init__(self, settings):
self._global = GlobalVariables(settings)
self._suite = None
self._test = None
self._scopes = [self._global]
self._variables_set = SetVariables()
@property
def current(self):
return self._scopes[-1]
@property
def _all_scopes(self):
return reversed(self._scopes)
@property
def _scopes_until_suite(self):
for scope in self._all_scopes:
yield scope
if scope is self._suite:
break
@property
def _scopes_until_test(self):
for scope in self._scopes_until_suite:
yield scope
if scope is self._test:
break
def start_suite(self):
self._suite = self._global.copy()
self._scopes.append(self._suite)
self._variables_set.start_suite()
self._variables_set.update(self._suite)
def end_suite(self):
self._scopes.pop()
self._suite = self._scopes[-1] if len(self._scopes) > 1 else None
self._variables_set.end_suite()
def start_test(self):
self._test = self._suite.copy()
self._scopes.append(self._test)
self._variables_set.start_test()
def end_test(self):
self._scopes.pop()
self._test = None
self._variables_set.end_test()
def start_keyword(self):
kw = self._suite.copy()
self._variables_set.start_keyword()
self._variables_set.update(kw)
self._scopes.append(kw)
def end_keyword(self):
self._scopes.pop()
self._variables_set.end_keyword()
def __getitem__(self, name):
return self.current[name]
def __setitem__(self, name, value):
self.current[name] = value
def __contains__(self, name):
return name in self.current
def replace_list(self, items, replace_until=None, ignore_errors=False):
return self.current.replace_list(items, replace_until, ignore_errors)
def replace_scalar(self, items, ignore_errors=False):
return self.current.replace_scalar(items, ignore_errors)
def replace_string(self, string, ignore_errors=False):
return self.current.replace_string(string, ignore_errors=ignore_errors)
def set_from_file(self, path, args, overwrite=False):
variables = None
for scope in self._scopes_until_suite:
if variables is None:
variables = scope.set_from_file(path, args, overwrite)
else:
scope.set_from_file(variables, overwrite=overwrite)
def set_from_variable_table(self, variables, overwrite=False):
for scope in self._scopes_until_suite:
scope.set_from_variable_table(variables, overwrite)
def resolve_delayed(self):
for scope in self._scopes_until_suite:
scope.resolve_delayed()
def set_global(self, name, value):
for scope in self._all_scopes:
name, value = self._set_global_suite_or_test(scope, name, value)
self._variables_set.set_global(name, value)
def _set_global_suite_or_test(self, scope, name, value):
scope[name] = value
# Avoid creating new list/dict objects in different scopes.
if name[0] != '$':
name = '$' + name[1:]
value = scope[name]
return name, value
def set_suite(self, name, value, top=False, children=False):
if top:
self._scopes[1][name] = value
return
for scope in self._scopes_until_suite:
name, value = self._set_global_suite_or_test(scope, name, value)
if children:
self._variables_set.set_suite(name, value)
def set_test(self, name, value):
if self._test is None:
raise DataError('Cannot set test variable when no test is started.')
for scope in self._scopes_until_test:
name, value = self._set_global_suite_or_test(scope, name, value)
self._variables_set.set_test(name, value)
def set_keyword(self, name, value):
self.current[name] = value
self._variables_set.set_keyword(name, value)
def as_dict(self, decoration=True):
return self.current.as_dict(decoration=decoration)
class GlobalVariables(Variables):
def __init__(self, settings):
Variables.__init__(self)
self._set_cli_variables(settings)
self._set_built_in_variables(settings)
def _set_cli_variables(self, settings):
for path, args in settings.variable_files:
try:
path = find_file(path, file_type='Variable file')
self.set_from_file(path, args)
except:
msg, details = get_error_details()
LOGGER.error(msg)
LOGGER.info(details)
for varstr in settings.variables:
try:
name, value = varstr.split(':', 1)
except ValueError:
name, value = varstr, ''
self['${%s}' % name] = value
def _set_built_in_variables(self, settings):
for name, value in [('${TEMPDIR}', abspath(tempfile.gettempdir())),
('${EXECDIR}', abspath('.')),
('${/}', os.sep),
('${:}', os.pathsep),
('${\\n}', os.linesep),
('${SPACE}', ' '),
('${True}', True),
('${False}', False),
('${None}', None),
('${null}', None),
('${OUTPUT_DIR}', settings.output_directory),
('${OUTPUT_FILE}', settings.output or 'NONE'),
('${REPORT_FILE}', settings.report or 'NONE'),
('${LOG_FILE}', settings.log or 'NONE'),
('${DEBUG_FILE}', settings.debug_file or 'NONE'),
('${LOG_LEVEL}', settings.log_level),
('${PREV_TEST_NAME}', ''),
('${PREV_TEST_STATUS}', ''),
('${PREV_TEST_MESSAGE}', '')]:
self[name] = value
class SetVariables(object):
def __init__(self):
self._suite = None
self._test = None
self._scopes = []
def start_suite(self):
if not self._scopes:
self._suite = NormalizedDict(ignore='_')
else:
self._suite = self._scopes[-1].copy()
self._scopes.append(self._suite)
def end_suite(self):
self._scopes.pop()
self._suite = self._scopes[-1] if self._scopes else None
def start_test(self):
self._test = self._scopes[-1].copy()
self._scopes.append(self._test)
def end_test(self):
self._test = None
self._scopes.pop()
def start_keyword(self):
self._scopes.append(self._scopes[-1].copy())
def end_keyword(self):
self._scopes.pop()
def set_global(self, name, value):
for scope in self._scopes:
if name in scope:
scope.pop(name)
def set_suite(self, name, value):
self._suite[name] = value
def set_test(self, name, value):
for scope in reversed(self._scopes):
scope[name] = value
if scope is self._test:
break
def set_keyword(self, name, value):
self._scopes[-1][name] = value
def update(self, variables):
for name, value in self._scopes[-1].items():
variables[name] = value
| |
import os
import sys
from django.contrib.messages import constants as messages
from geotrek import __version__
from . import PROJECT_ROOT_PATH
def gettext_noop(s):
return s
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TEST = 'test' in sys.argv
VERSION = __version__
ADMINS = (
('Makina Corpus', 'geobi@makina-corpus.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'OPTIONS': {},
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#
# PostgreSQL Schemas for apps and models.
#
# Caution: editing this setting might not be enough.
# Indeed, it won't apply to apps that not managed of South, nor database views and functions.
# See all sql/*-schemas.sql files in each Geotrek app.
#
DATABASE_SCHEMAS = {
'default': 'geotrek',
'auth': 'django',
'django': 'django',
'easy_thumbnails': 'django',
'south': 'django',
'feedback': 'gestion',
'infrastructure': 'gestion',
'maintenance': 'gestion',
'tourism': 'tourisme',
'trekking': 'rando',
'zoning': 'zonage',
'land': 'foncier',
}
DATABASES['default']['OPTIONS'] = {
'options': '-c search_path=public,%s' % ','.join(set(DATABASE_SCHEMAS.values()))
}
#
# Authentication
#
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
AUTH_PROFILE_MODULE = 'authent.UserProfile'
# Settings required for geotrek.authent.backend.DatabaseBackend :
AUTHENT_DATABASE = None
AUTHENT_TABLENAME = None
AUTHENT_GROUPS_MAPPING = {
'PATH_MANAGER': 1,
'TREKKING_MANAGER': 2,
'EDITOR': 3,
'READER': 4,
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr'
MODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE
LANGUAGES = (
('en', gettext_noop('English')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('es', gettext_noop('Spanish')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT_PATH, 'locale'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_INPUT_FORMATS = ('%d/%m/%Y',)
ROOT_URL = ''
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT_PATH, 'media')
UPLOAD_DIR = 'upload' # media root subdir
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
MEDIA_URL_SECURE = '/media_secure/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT_PATH, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
COMPRESSOR_ENABLED = False
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'public_key'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'geotrek.authent.middleware.LocaleForcedMiddleware',
'django.middleware.locale.LocaleMiddleware',
'geotrek.common.middleware.APILocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geotrek.authent.middleware.CorsMiddleware',
'mapentity.middleware.AutoLoginMiddleware'
)
ROOT_URLCONF = 'geotrek.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geotrek.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'mapentity.context_processors.settings',
)
#
# /!\ Application names (last levels) must be unique
# (c.f. auth/authent)
# https://code.djangoproject.com/ticket/12288
#
PROJECT_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
)
# Do not migrate translated fields, they differ per instance, and
# can be added/removed using `update_translation_fields`
if 'schemamigration' not in sys.argv:
PROJECT_APPS += ('modeltranslation',)
PROJECT_APPS += (
'south',
'leaflet',
'floppyforms',
'crispy_forms',
'compressor',
'djgeojson',
'tinymce',
'easy_thumbnails',
'shapes',
'paperclip',
'mapentity',
'rest_framework',
'embed_video',
'djcelery',
)
INSTALLED_APPS = PROJECT_APPS + (
'geotrek.cirkwi',
'geotrek.authent',
'geotrek.common',
'geotrek.altimetry',
'geotrek.core',
'geotrek.infrastructure',
'geotrek.maintenance',
'geotrek.zoning',
'geotrek.land',
'geotrek.trekking',
'geotrek.tourism',
'geotrek.flatpages',
'geotrek.feedback',
)
SERIALIZATION_MODULES = {
'geojson': 'djgeojson.serializers'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
# The fat backend is used to store big chunk of data (>1 Mo)
'fat': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s %(name)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'logging.NullHandler'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'south': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'geotrek': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'mapentity': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
}
}
THUMBNAIL_ALIASES = {
'': {
'thumbnail': {'size': (150, 150)},
# Thumbnails for public trek website
'small-square': {'size': (120, 120), 'crop': True},
'medium': {'size': (800, 800)},
# Header image for trek export (keep ratio of TREK_EXPORT_HEADER_IMAGE_SIZE)
'print': {'size': (1000, 500), 'crop': 'smart'},
},
}
PAPERCLIP_CONFIG = {
'ENABLE_VIDEO': True,
'FILETYPE_MODEL': 'common.FileType',
'ATTACHMENT_TABLE_NAME': 'fl_t_fichier',
}
# Data projection
SRID = 3857
# API projection (client-side), can differ from SRID (database). Leaflet requires 4326.
API_SRID = 4326
# Extent in native projection (Toulouse area)
SPATIAL_EXTENT = (144968, 5415668, 175412, 5388753)
MAPENTITY_CONFIG = {
'TITLE': gettext_noop("Geotrek"),
'TEMP_DIR': '/tmp',
'HISTORY_ITEMS_MAX': 7,
'CONVERSION_SERVER': 'http://127.0.0.1:6543',
'CAPTURE_SERVER': 'http://127.0.0.1:8001',
'ROOT_URL': ROOT_URL,
'MAP_BACKGROUND_FOGGED': True,
'GEOJSON_LAYERS_CACHE_BACKEND': 'fat',
'SENDFILE_HTTP_HEADER': 'X-Accel-Redirect',
'DRF_API_URL_PREFIX': r'^api/(?P<lang>\w+)/',
}
DEFAULT_STRUCTURE_NAME = gettext_noop('Default')
VIEWPORT_MARGIN = 0.1 # On list page, around spatial extent from settings.ini
PATHS_LINE_MARKER = 'dotL'
PATH_SNAPPING_DISTANCE = 1 # Distance of path snapping in meters
SNAP_DISTANCE = 30 # Distance of snapping in pixels
ALTIMETRIC_PROFILE_PRECISION = 25 # Sampling precision in meters
ALTIMETRIC_PROFILE_BACKGROUND = 'white'
ALTIMETRIC_PROFILE_COLOR = '#F77E00'
ALTIMETRIC_PROFILE_HEIGHT = 400
ALTIMETRIC_PROFILE_WIDTH = 800
ALTIMETRIC_PROFILE_FONTSIZE = 25
ALTIMETRIC_PROFILE_FONT = 'ubuntu'
ALTIMETRIC_PROFILE_MIN_YSCALE = 1200 # Minimum y scale (in meters)
ALTIMETRIC_AREA_MAX_RESOLUTION = 150 # Maximum number of points (by width/height)
ALTIMETRIC_AREA_MARGIN = 0.15
# Let this be defined at instance-level
LEAFLET_CONFIG = {
'SRID': SRID,
'TILES': [
('Scan', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',),
('Ortho', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.jpg'),
],
'TILES_EXTENT': SPATIAL_EXTENT,
# Extent in API projection (Leaflet view default extent)
'SPATIAL_EXTENT': (1.3, 43.7, 1.5, 43.5),
'NO_GLOBALS': False,
'PLUGINS': {
'geotrek': {'js': ['core/leaflet.lineextremities.js',
'core/leaflet.textpath.js',
'trekking/points_reference.js',
'trekking/parking_location.js']},
'topofields': {'js': ['core/geotrek.forms.snap.js',
'core/geotrek.forms.topology.js',
'core/dijkstra.js',
'core/multipath.js',
'core/topology_helper.js']}
}
}
""" This *pool* of colors is used to colorized lands records.
"""
COLORS_POOL = {'land': ['#f37e79', '#7998f3', '#bbf379', '#f379df', '#f3bf79', '#9c79f3', '#7af379'],
'physical': ['#f3799d', '#79c1f3', '#e4f379', '#de79f3', '#79f3ba', '#f39779', '#797ff3'],
'competence': ['#a2f379', '#f379c6', '#79e9f3', '#f3d979', '#b579f3', '#79f392', '#f37984'],
'signagemanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'workmanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'restrictedarea': ['plum', 'violet', 'deeppink', 'orchid',
'darkviolet', 'lightcoral', 'palevioletred',
'MediumVioletRed', 'MediumOrchid', 'Magenta',
'LightSalmon', 'HotPink', 'Fuchsia']}
MAP_STYLES = {
'path': {'weight': 2, 'opacity': 1.0, 'color': '#FF4800'},
'city': {'weight': 4, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0},
'district': {'weight': 6, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0, 'dashArray': '12, 12'},
'restrictedarea': {'weight': 2, 'color': 'red', 'opacity': 0.5, 'fillOpacity': 0.5},
'land': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'physical': {'weight': 6, 'color': 'red', 'opacity': 1.0},
'competence': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'workmanagement': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'signagemanagement': {'weight': 5, 'color': 'red', 'opacity': 1.0},
'print': {
'path': {'weight': 1},
'trek': {'color': '#FF3300', 'weight': 7, 'opacity': 0.5,
'arrowColor': 'black', 'arrowSize': 10},
}
}
LAYER_PRECISION_LAND = 4 # Number of fraction digit
LAYER_SIMPLIFY_LAND = 10 # Simplification tolerance
LAND_BBOX_CITIES_ENABLED = True
LAND_BBOX_DISTRICTS_ENABLED = True
LAND_BBOX_AREAS_ENABLED = False
PUBLISHED_BY_LANG = True
EXPORT_MAP_IMAGE_SIZE = {
'trek': (14.1, 11),
'poi': (14.1, 11),
'touristiccontent': (14.1, 11),
'touristicevent': (14.1, 11),
}
EXPORT_HEADER_IMAGE_SIZE = {
'trek': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'poi': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristiccontent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristicevent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
}
COMPLETENESS_FIELDS = {
'trek': ['departure', 'duration', 'difficulty', 'description_teaser']
}
TRAIL_MODEL_ENABLED = True
TREKKING_TOPOLOGY_ENABLED = True
FLATPAGES_ENABLED = False # False because still experimental
TOURISM_ENABLED = False # False because still experimental
TREK_POI_INTERSECTION_MARGIN = 500 # meters (used only if TREKKING_TOPOLOGY_ENABLED = False)
TOURISM_INTERSECTION_MARGIN = 500 # meters (always used)
SIGNAGE_LINE_ENABLED = False
TREK_POINTS_OF_REFERENCE_ENABLED = True
TREK_EXPORT_POI_LIST_LIMIT = 14
TREK_EXPORT_INFORMATION_DESK_LIST_LIMIT = 2
TREK_DAY_DURATION = 10 # Max duration to be done in one day
TREK_ICON_SIZE_POI = 18
TREK_ICON_SIZE_PARKING = 18
TREK_ICON_SIZE_INFORMATION_DESK = 18
# Static offsets in projection units
TOPOLOGY_STATIC_OFFSETS = {'land': -5,
'physical': 0,
'competence': 5,
'signagemanagement': -10,
'workmanagement': 10}
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success',
messages.INFO: 'alert-info',
messages.DEBUG: 'alert-info',
messages.WARNING: 'alert-error',
messages.ERROR: 'alert-error',
}
CACHE_TIMEOUT_LAND_LAYERS = 60 * 60 * 24
CACHE_TIMEOUT_TOURISM_DATASOURCES = 60 * 60 * 24
TREK_CATEGORY_ORDER = None
TOURISTIC_EVENT_CATEGORY_ORDER = None
SPLIT_TREKS_CATEGORIES_BY_PRACTICE = False
SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY = False
HIDE_PUBLISHED_TREKS_IN_TOPOLOGIES = False
ZIP_TOURISTIC_CONTENTS_AS_POI = False
CRISPY_ALLOWED_TEMPLATE_PACKS = ('bootstrap', 'bootstrap3')
CRISPY_TEMPLATE_PACK = 'bootstrap'
# Mobile app_directories
MOBILE_TILES_URL = 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
MOBILE_TILES_RADIUS_LARGE = 0.01 # ~1 km
MOBILE_TILES_RADIUS_SMALL = 0.005 # ~500 m
MOBILE_TILES_GLOBAL_ZOOMS = range(13)
MOBILE_TILES_LOW_ZOOMS = range(13, 15)
MOBILE_TILES_HIGH_ZOOMS = range(15, 17)
import djcelery
djcelery.setup_loader()
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_RESULT_EXPIRES = 5
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
| |
# -*- coding: utf-8 -*-
""" Handle most tasks related to DynamoDB interaction """
import re
import sys
import time
import datetime
from boto import dynamodb2
from boto.dynamodb2.table import Table
from boto.exception import DynamoDBResponseError, JSONResponseError
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.config_handler import (
get_configured_tables,
get_global_option,
get_gsi_option,
get_table_option)
from dynamic_dynamodb.aws import sns
def get_tables_and_gsis():
""" Get a set of tables and gsis and their configuration keys
:returns: set -- A set of tuples (table_name, table_conf_key)
"""
table_names = set()
configured_tables = get_configured_tables()
not_used_tables = set(configured_tables)
# Add regexp table names
for table_instance in list_tables():
for key_name in configured_tables:
try:
if re.match(key_name, table_instance.table_name):
logger.debug("Table {0} match with config key {1}".format(
table_instance.table_name, key_name))
# Notify users about regexps that match multiple tables
for key, value in table_names:
if key == table_instance.table_name:
logger.warning(
'Table {0} matches multiple regexps in '
'the configuration'.format(
table_instance.table_name))
table_names.add(
(
table_instance.table_name,
key_name
))
not_used_tables.discard(key_name)
else:
logger.debug(
"Table {0} did not match with config key {1}".format(
table_instance.table_name, key_name))
except re.error:
logger.error('Invalid regular expression: "{0}"'.format(
key_name))
sys.exit(1)
if not_used_tables:
logger.warning(
'No tables matching the following configured '
'tables found: {0}'.format(', '.join(not_used_tables)))
return sorted(table_names)
def get_table(table_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: boto.dynamodb.table.Table
"""
try:
table = Table(table_name, connection=DYNAMODB_CONNECTION)
except DynamoDBResponseError as error:
dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
if dynamodb_error == 'ResourceNotFoundException':
logger.error(
'{0} - Table {1} not found'.format(table_name, table_name))
raise
return table
def get_gsi_status(table_name, gsi_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: str
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
return gsi[u'IndexStatus']
def get_provisioned_gsi_read_units(table_name, gsi_name):
""" Returns the number of provisioned read units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: int -- Number of read units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
read_units = int(
gsi[u'ProvisionedThroughput'][u'ReadCapacityUnits'])
break
logger.debug(
'{0} - GSI: {1} - Currently provisioned read units: {2:d}'.format(
table_name, gsi_name, read_units))
return read_units
def get_provisioned_gsi_write_units(table_name, gsi_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:returns: int -- Number of write units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']:
if gsi[u'IndexName'] == gsi_name:
write_units = int(
gsi[u'ProvisionedThroughput'][u'WriteCapacityUnits'])
break
logger.debug(
'{0} - GSI: {1} - Currently provisioned write units: {2:d}'.format(
table_name, gsi_name, write_units))
return write_units
def get_provisioned_table_read_units(table_name):
""" Returns the number of provisioned read units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of read units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
read_units = int(
desc[u'Table'][u'ProvisionedThroughput'][u'ReadCapacityUnits'])
logger.debug('{0} - Currently provisioned read units: {1:d}'.format(
table_name, read_units))
return read_units
def get_provisioned_table_write_units(table_name):
""" Returns the number of provisioned write units for the table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: int -- Number of write units
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
write_units = int(
desc[u'Table'][u'ProvisionedThroughput'][u'WriteCapacityUnits'])
logger.debug('{0} - Currently provisioned write units: {1:d}'.format(
table_name, write_units))
return write_units
def get_table_status(table_name):
""" Return the DynamoDB table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: str
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)
except JSONResponseError:
raise
return desc[u'Table'][u'TableStatus']
def list_tables():
""" Return list of DynamoDB tables available from AWS
:returns: list -- List of DynamoDB tables
"""
tables = []
try:
table_list = DYNAMODB_CONNECTION.list_tables()
while True:
for table_name in table_list[u'TableNames']:
tables.append(get_table(table_name))
if u'LastEvaluatedTableName' in table_list:
table_list = DYNAMODB_CONNECTION.list_tables(
table_list[u'LastEvaluatedTableName'])
else:
break
except DynamoDBResponseError as error:
dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
if dynamodb_error == 'ResourceNotFoundException':
logger.error('No tables found')
elif dynamodb_error == 'AccessDeniedException':
logger.debug(
'Your AWS API keys lack access to listing tables. '
'That is an issue if you are trying to use regular '
'expressions in your table configuration.')
elif dynamodb_error == 'UnrecognizedClientException':
logger.error(
'Invalid security token. Are your AWS API keys correct?')
else:
logger.error(
(
'Unhandled exception: {0}: {1}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(
dynamodb_error,
error.body['message']))
except JSONResponseError as error:
logger.error('Communication error: {0}'.format(error))
sys.exit(1)
return tables
def update_table_provisioning(
table_name, key_name, reads, writes, retry_with_only_increase=False):
""" Update provisioning for a given table
:type table_name: str
:param table_name: Name of the table
:type key_name: str
:param key_name: Configuration option key name
:type reads: int
:param reads: New number of provisioned read units
:type writes: int
:param writes: New number of provisioned write units
:type retry_with_only_increase: bool
:param retry_with_only_increase: Set to True to ensure only increases
"""
table = get_table(table_name)
current_reads = int(get_provisioned_table_read_units(table_name))
current_writes = int(get_provisioned_table_write_units(table_name))
if retry_with_only_increase:
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
logger.info(
'{0} - Retrying to update provisioning, excluding any decreases. '
'Setting new reads to {1} and new writes to {2}'.format(
table_name, reads, writes))
# Check that we are in the right time frame
maintenance_windows = get_table_option(key_name, 'maintenance_windows')
if maintenance_windows:
if not __is_table_maintenance_window(table_name, maintenance_windows):
logger.warning(
'{0} - We are outside a maintenace window. '
'Will only perform up scaling activites'.format(table_name))
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale up
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - No need to scale up reads nor writes'.format(
table_name))
return
else:
logger.info(
'{0} - Current time is within maintenance window'.format(
table_name))
logger.info(
'{0} - Updating provisioning to {1} reads and {2} writes'.format(
table_name, reads, writes))
# Return if dry-run
if get_global_option('dry_run'):
return
try:
table.update(
throughput={
'read': reads,
'write': writes
})
# See if we should send notifications for scale-down, scale-up or both
sns_message_types = []
if current_reads > reads or current_writes > writes:
sns_message_types.append('scale-down')
if current_reads < reads or current_writes < writes:
sns_message_types.append('scale-up')
message = []
if current_reads > reads:
message.append('{0} - Reads: DOWN from {1} to {2}\n'.format(
table_name, current_reads, reads))
elif current_reads < reads:
message.append('{0} - Reads: UP from {1} to {2}\n'.format(
table_name, current_reads, reads))
if current_writes > writes:
message.append('{0} - Writes: DOWN from {1} to {2}\n'.format(
table_name, current_writes, writes))
elif current_writes < writes:
message.append('{0} - Writes: UP from {1} to {2}\n'.format(
table_name, current_writes, writes))
sns.publish_table_notification(
key_name,
''.join(message),
sns_message_types,
subject='Updated provisioning for table {0}'.format(table_name))
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
know_exceptions = [
'LimitExceededException',
'ValidationException',
'ResourceInUseException']
if exception in know_exceptions:
logger.warning('{0} - {1}: {2}'.format(
table_name, exception, error.body['message']))
else:
if 'message' in error.body:
msg = error.body['message']
else:
msg = error
logger.error(
(
'{0} - Unhandled exception: {1}: {2}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(table_name, exception, msg))
if (not retry_with_only_increase and
exception == 'LimitExceededException'):
logger.info(
'{0} - Will retry to update provisioning '
'with only increases'.format(table_name))
update_table_provisioning(
table_name,
key_name,
reads,
writes,
retry_with_only_increase=True)
def update_gsi_provisioning(
table_name, table_key, gsi_name, gsi_key,
reads, writes, retry_with_only_increase=False):
""" Update provisioning on a global secondary index
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: GSI configuration option key name
:type reads: int
:param reads: Number of reads to provision
:type writes: int
:param writes: Number of writes to provision
:type retry_with_only_increase: bool
:param retry_with_only_increase: Set to True to ensure only increases
"""
current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name))
current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name))
if retry_with_only_increase:
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale at all
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - GSI: {1} - No need to scale up reads nor writes'.format(
table_name, gsi_name))
return
logger.info(
'{0} - GSI: {1} - Retrying to update provisioning, '
'excluding any decreases. '
'Setting new reads to {2} and new writes to {3}'.format(
table_name, gsi_name, reads, writes))
# Check that we are in the right time frame
m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows')
if m_windows:
if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows):
logger.warning(
'{0} - GSI: {1} - We are outside a maintenace window. '
'Will only perform up scaling activites'.format(
table_name,
gsi_name))
# Ensure that we are only doing increases
if current_reads > reads:
reads = current_reads
if current_writes > writes:
writes = current_writes
# Return if we do not need to scale up
if reads == current_reads and writes == current_writes:
logger.info(
'{0} - GSI: {1} - '
'No need to scale up reads nor writes'.format(
table_name,
gsi_name))
return
else:
logger.info(
'{0} - GSI: {1} - '
'Current time is within maintenance window'.format(
table_name,
gsi_name))
logger.info(
'{0} - GSI: {1} - '
'Updating provisioning to {2} reads and {3} writes'.format(
table_name, gsi_name, reads, writes))
# Return if dry-run
if get_global_option('dry_run'):
return
try:
DYNAMODB_CONNECTION.update_table(
table_name=table_name,
global_secondary_index_updates=[
{
"Update": {
"IndexName": gsi_name,
"ProvisionedThroughput": {
"ReadCapacityUnits": reads,
"WriteCapacityUnits": writes
}
}
}
])
message = []
if current_reads > reads:
message.append(
'{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format(
table_name, gsi_name, current_reads, reads))
elif current_reads < reads:
message.append(
'{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format(
table_name, gsi_name, current_reads, reads))
if current_writes > writes:
message.append(
'{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format(
table_name, gsi_name, current_writes, writes))
elif current_writes < writes:
message.append(
'{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format(
table_name, gsi_name, current_writes, writes))
# See if we should send notifications for scale-down, scale-up or both
sns_message_types = []
if current_reads > reads or current_writes > writes:
sns_message_types.append('scale-down')
if current_reads < reads or current_writes < writes:
sns_message_types.append('scale-up')
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(message),
sns_message_types,
subject='Updated provisioning for GSI {0}'.format(gsi_name))
except JSONResponseError as error:
exception = error.body['__type'].split('#')[1]
know_exceptions = ['LimitExceededException']
if exception in know_exceptions:
logger.warning('{0} - GSI: {1} - {2}: {3}'.format(
table_name, gsi_name, exception, error.body['message']))
else:
logger.error(
(
'{0} - GSI: {1} - Unhandled exception: {2}: {3}. '
'Please file a bug report at '
'https://github.com/sebdah/dynamic-dynamodb/issues'
).format(
table_name, gsi_name, exception, error.body['message']))
if (not retry_with_only_increase and
exception == 'LimitExceededException'):
logger.info(
'{0} - GSI: {1} - Will retry to update provisioning '
'with only increases'.format(table_name, gsi_name))
update_gsi_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
reads,
writes,
retry_with_only_increase=True)
def table_gsis(table_name):
""" Returns a list of GSIs for the given table
:type table_name: str
:param table_name: Name of the DynamoDB table
:returns: list -- List of GSI names
"""
try:
desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table']
except JSONResponseError:
raise
if u'GlobalSecondaryIndexes' in desc:
return desc[u'GlobalSecondaryIndexes']
return []
def __get_connection_dynamodb(retries=3):
""" Ensure connection to DynamoDB
:type retries: int
:param retries: Number of times to retry to connect to DynamoDB
"""
connected = False
region = get_global_option('region')
while not connected:
if (get_global_option('aws_access_key_id') and
get_global_option('aws_secret_access_key')):
logger.debug(
'Authenticating to DynamoDB using '
'credentials in configuration file')
connection = dynamodb2.connect_to_region(
region,
aws_access_key_id=get_global_option('aws_access_key_id'),
aws_secret_access_key=get_global_option(
'aws_secret_access_key'))
else:
logger.debug(
'Authenticating using boto\'s authentication handler')
connection = dynamodb2.connect_to_region(region)
if not connection:
if retries == 0:
logger.error('Failed to connect to DynamoDB. Giving up.')
raise
else:
logger.error(
'Failed to connect to DynamoDB. Retrying in 5 seconds')
retries -= 1
time.sleep(5)
else:
connected = True
logger.debug('Connected to DynamoDB in {0}'.format(region))
return connection
def __is_gsi_maintenance_window(table_name, gsi_name, maintenance_windows):
""" Checks that the current time is within the maintenance window
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type maintenance_windows: str
:param maintenance_windows: Example: '00:00-01:00,10:00-11:00'
:returns: bool -- True if within maintenance window
"""
# Example string '00:00-01:00,10:00-11:00'
maintenance_window_list = []
for window in maintenance_windows.split(','):
try:
start, end = window.split('-', 1)
except ValueError:
logger.error(
'{0} - GSI: {1} - '
'Malformatted maintenance window'.format(table_name, gsi_name))
return False
maintenance_window_list.append((start, end))
now = datetime.datetime.utcnow().strftime('%H%M')
for maintenance_window in maintenance_window_list:
start = ''.join(maintenance_window[0].split(':'))
end = ''.join(maintenance_window[1].split(':'))
if now >= start and now <= end:
return True
return False
def __is_table_maintenance_window(table_name, maintenance_windows):
""" Checks that the current time is within the maintenance window
:type table_name: str
:param table_name: Name of the DynamoDB table
:type maintenance_windows: str
:param maintenance_windows: Example: '00:00-01:00,10:00-11:00'
:returns: bool -- True if within maintenance window
"""
# Example string '00:00-01:00,10:00-11:00'
maintenance_window_list = []
for window in maintenance_windows.split(','):
try:
start, end = window.split('-', 1)
except ValueError:
logger.error(
'{0} - Malformatted maintenance window'.format(table_name))
return False
maintenance_window_list.append((start, end))
now = datetime.datetime.utcnow().strftime('%H%M')
for maintenance_window in maintenance_window_list:
start = ''.join(maintenance_window[0].split(':'))
end = ''.join(maintenance_window[1].split(':'))
if now >= start and now <= end:
return True
return False
DYNAMODB_CONNECTION = __get_connection_dynamodb()
| |
# -*- coding: utf-8 -*-
"""
cpsdirector.user
================
ConPaaS director: users and authentication handling
:copyright: (C) 2013 by Contrail Consortium.
"""
from flask import Blueprint
from flask import jsonify, helpers, request, make_response, g
import os
import hashlib
import zipfile
import simplejson
from datetime import datetime
from functools import wraps
from StringIO import StringIO
from OpenSSL import crypto
from cpsdirector import db, x509cert
from cpsdirector.common import log, config_parser, build_response
from conpaas.core import https
user_page = Blueprint('user_page', __name__)
class cert_required(object):
def __init__(self, role):
self.role = role
def __call__(self, fn):
@wraps(fn)
def decorated(*args, **kwargs):
g.cert = {}
if os.environ.get('DIRECTOR_TESTING'):
# No SSL certificate check if we are testing. Trust what the
# client is sending.
g.cert['UID'] = request.values.get('uid')
g.cert['role'] = request.values.get('role')
g.cert['serviceLocator'] = request.values.get('sid')
else:
cert = request.environ['SSL_CLIENT_CERT']
for key in 'serviceLocator', 'UID', 'role':
g.cert[key] = https.x509.get_x509_dn_field(cert, key)
try:
uid = int(g.cert['UID'])
except (AttributeError, ValueError, TypeError):
error_msg = 'cert_required: client certificate does NOT provide UID'
log(error_msg)
return make_response(error_msg, 401)
# Getting user data from DB
g.user = User.query.get(uid)
if not g.user:
# authentication failed
return build_response(simplejson.dumps(False))
if self.role == 'manager':
# manager cert required
try:
service_locator = int(g.cert['serviceLocator'])
except (AttributeError, ValueError):
error_msg = 'cert_required: client certificate does NOT provide serviceLocator'
log(error_msg)
# Return HTTP_UNAUTHORIZED
return make_response(error_msg, 401)
# check if the service is actually owned by the user
from cpsdirector.service import get_service
g.service = get_service(uid, service_locator)
if not g.service:
return build_response(simplejson.dumps(False))
log('cert_required: valid certificate (user %s, service %s)' %
(uid, service_locator))
return fn(*args, **kwargs)
return decorated
class User(db.Model):
uid = db.Column(db.Integer, primary_key=True,
autoincrement=True)
username = db.Column(db.String(80), unique=True, nullable=False)
fname = db.Column(db.String(256))
lname = db.Column(db.String(256))
email = db.Column(db.String(256), unique=True)
affiliation = db.Column(db.String(256))
password = db.Column(db.String(256))
created = db.Column(db.DateTime)
credit = db.Column(db.Integer)
def __init__(self, **kwargs):
# Default values
self.credit = 0
self.created = datetime.now()
for key, val in kwargs.items():
setattr(self, key, val)
def to_dict(self):
return {
'uid': self.uid, 'username': self.username,
'fname': self.fname, 'lname': self.lname,
'email': self.email, 'affiliation': self.affiliation,
'password': self.password, 'credit': self.credit,
'created': self.created.isoformat(),
}
def get_user(username, password):
"""Return a User object if the specified (username, password) combination
is valid."""
return User.query.filter_by(username=username,
password=hashlib.md5(password).hexdigest()).first()
from cpsdirector.application import Application
def create_user(username, fname, lname, email, affiliation, password, credit):
"""Create a new user with the given attributes. Return a new User object
in case of successful creation. None otherwise."""
new_user = User(username=username,
fname=fname,
lname=lname,
email=email,
affiliation=affiliation,
password=hashlib.md5(password).hexdigest(),
credit=credit)
app = Application(user=new_user)
db.session.add(new_user)
db.session.add(app)
try:
db.session.commit()
return new_user
except Exception, err:
db.session.rollback()
raise err
def login_required(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
username = request.values.get('username', '')
password = request.values.get('password', '')
# Getting user data from DB
g.user = get_user(username, password)
if g.user:
# user authenticated
return fn(*args, **kwargs)
# authentication failed
return build_response(simplejson.dumps(False))
return decorated_view
@user_page.route("/new_user", methods=['POST'])
def new_user():
values = {}
required_fields = ( 'username', 'fname', 'lname', 'email',
'affiliation', 'password', 'credit' )
log('New user "%s <%s>" creation attempt' % (
request.values.get('username'), request.values.get('email')))
# check for presence of mandatory fields
for field in required_fields:
values[field] = request.values.get(field)
if not values[field]:
log('Missing required field: %s' % field)
return build_response(jsonify({
'error': True, 'msg': '%s is a required field' % field }))
# check if the provided username already exists
if User.query.filter_by(username=values['username']).first():
log('User %s already exists' % values['username'])
return build_response(jsonify({
'error': True,
'msg': 'Username "%s" already taken' % values['username'] }))
# check if the provided email already exists
if User.query.filter_by(email=values['email']).first():
log('Duplicate email: %s' % values['email'])
return build_response(jsonify({
'error': True,
'msg': 'E-mail "%s" already registered' % values['email'] }))
try:
user = create_user(**values)
# successful creation
log('User %s created successfully' % user.username)
return build_response(simplejson.dumps(user.to_dict()))
except Exception, err:
# something went wrong
error_msg = 'Error upon user creation: %s -> %s' % (type(err), err)
log(error_msg)
return build_response(jsonify({ 'error': True, 'msg': error_msg }))
@user_page.route("/login", methods=['POST'])
@login_required
def login():
log('Successful login for user %s' % g.user.username)
# return user data
return build_response(simplejson.dumps(g.user.to_dict()))
@user_page.route("/getcerts", methods=['POST','GET'])
@login_required
def get_user_certs():
# Creates new certificates for this user
certs = x509cert.generate_certificate(
cert_dir=config_parser.get('conpaas', 'CERT_DIR'),
uid=str(g.user.uid),
sid='0',
role='user',
email=g.user.email,
cn=g.user.username,
org='Contrail'
)
# In-memory zip file
zipdata = StringIO()
archive = zipfile.ZipFile(zipdata, mode='w')
# Add key.pem, cert.pem and ca_cert.pem to the zip file
for name, data in certs.items():
archive.writestr(name + '.pem', data)
archive.close()
zipdata.seek(0)
log('New certificates for user %s created' % g.user.username)
# Send zip archive to the client
return helpers.send_file(zipdata, mimetype="application/zip",
as_attachment=True, attachment_filename='certs.zip')
@user_page.route("/ca/get_cert.php", methods=['POST'])
@cert_required(role='manager')
def get_manager_cert():
log('Certificate request from manager %s (user %s)' % (
g.cert['serviceLocator'], g.cert['UID']))
csr = crypto.load_certificate_request(crypto.FILETYPE_PEM,
request.files['csr'].read())
return x509cert.create_x509_cert(
config_parser.get('conpaas', 'CERT_DIR'), csr)
@user_page.route("/callback/decrementUserCredit.php", methods=['POST'])
@cert_required(role='manager')
def credit():
"""POST /callback/decrementUserCredit.php
POSTed values must contain sid and decrement.
Returns a dictionary with the 'error' attribute set to False if the user
had enough credit, True otherwise.
"""
service_id = int(request.values.get('sid', -1))
decrement = int(request.values.get('decrement', 0))
log('Decrement user credit: sid=%s, old_credit=%s, decrement=%s' % (
service_id, g.service.user.credit, decrement))
# Decrement user's credit
g.service.user.credit -= decrement
if g.service.user.credit > -1:
# User has enough credit
db.session.commit()
log('New credit for user %s: %s' % (g.service.user.uid, g.service.user.credit))
return jsonify({ 'error': False })
# User does not have enough credit
db.session.rollback()
log('User %s does not have enough credit' % g.service.user.uid)
return jsonify({ 'error': True })
| |
'''
Stateful, singleton, paradrop daemon command center.
See docstring for NexusBase class for information on settings.
SETTINGS QUICK REFERENCE:
# assuming the following import
from paradrop.base import nexus
nexus.core.info.version
nexus.core.info.pdid
'''
import os
import yaml
import json
import smokesignal
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from . import output, settings
from paradrop.lib.utils import pdosq
# Global access. Assign this wherever you instantiate the Nexus object:
# nexus.core = MyNexusSubclass()
core = None
class NexusBase(object):
'''
Resolving these values to their final forms:
1 - module imported, initial values assigned(as written below)
2 - class is instatiated, passed settings to replace values
3 - instance chooses appropriate values based on current state(production or local)
Each category has its own method for initialization here
(see: resolveNetwork, resolvePaths)
'''
VERSION = 1 # nexus.core.info.version
PDID = None # nexus.core.info.pdid
def __init__(self, stealStdio=True, printToConsole=True):
'''
The one big thing this function leaves out is reactor.start(). Call this externally
*after * initializing a nexus object.
'''
self.session = None
self.wamp_connected = False
self.jwt_valid = False
self.info = AttrWrapper()
resolveInfo(self, settings.CONFIG_FILE)
self.info.setOnChange(self.onInfoChange)
# initialize output. If filepath is set, logs to file.
# If stealStdio is set intercepts all stderr and stdout and interprets it internally
# If printToConsole is set (defaults True) all final output is rendered to stdout
output.out.startLogging(filePath=settings.LOG_DIR, stealStdio=stealStdio, printToConsole=printToConsole)
# register onStop for the shutdown call
reactor.addSystemEventTrigger('before', 'shutdown', self.onStop)
# The reactor needs to be runnnig before this call is fired, since we start the session
# here. Assuming callLater doesn't fire until thats happened
reactor.callLater(0, self.onStart)
def onStart(self):
pdid = self.info.pdid if self.provisioned() else 'UNPROVISIONED'
output.out.usage('%s coming up' % (pdid))
def onStop(self):
self.save()
output.out.usage('%s going down' % (self.info.pdid))
smokesignal.clear_all()
output.out.endLogging()
@inlineCallbacks
def connect(self, sessionClass, debug=False):
'''
Takes the given session class and attempts to connect to the crossbar fabric.
If an existing session is connected, it is cleanly closed.
'''
if (self.session is not None):
yield self.session.leave()
self.wamp_connected = False
output.out.info('Connecting to wamp router at URI: %s' % str(self.info.wampRouter))
# Setting self.session here only works for the first connection but
# becomes stale if the connection fails and we reconnect.
# In that case a new session object is automatically created.
# For this reason, we also update this session reference in BaseSession.onJoin.
self.session = yield sessionClass.start(self.info.wampRouter, self.info.pdid, debug=debug)
returnValue(self.session)
def onInfoChange(self, key, value):
'''
Called when an internal setting is changed. Trigger a save automatically.
'''
self.save()
def save(self):
''' Ehh. Ideally this should happen asynchronously. '''
saveDict = self.info.__dict__['contents']
saveDict['version'] = self.info.version
writeYaml(saveDict, settings.CONFIG_FILE)
#########################################################
# High Level Methods
#########################################################
def provisioned(self):
'''
Checks if this[whatever] appears to be provisioned or not
'''
return self.info.pdid is not None
def provision(self, pdid, pdserver=settings.PDSERVER, wampRouter=settings.WAMP_ROUTER):
self.info.pdid = pdid
self.info.pdserver = pdserver
self.info.wampRouter = wampRouter
#########################################################
# Keys
#########################################################
def saveKey(self, key, name):
''' Save the key with the given name. Overwrites by default '''
path = settings.KEY_DIR + name
with open(path, 'wb') as f:
f.write(key)
def getKey(self, name):
''' Returns the given key or None '''
path = settings.KEY_DIR + name
if os.path.isfile(path):
with open(path, 'rb') as f:
return f.read()
return None
#########################################################
# Misc
#########################################################
def __repr__(self):
''' Dump everything '''
dic = dict(info=self.info.contents)
return json.dumps(dic, sort_keys=True, indent=4)
#########################################################
# Utils
#########################################################
class AttrWrapper(object):
'''
Simple attr interceptor to make accessing settings simple.
Stores values in an internal dict called contents.
Does not allow modification once _lock() is called. Respect it.
Once you've filled it up with the appropriate initial values, set
onChange to assign
'''
def __init__(self):
self.__dict__['contents'] = {}
# Called when a value changes unless None
self.__dict__['onChange'] = None
# Lock the contents of this wrapper. Can read valued, cant write them
self.__dict__['locked'] = False
def _lock(self):
self.__dict__['locked'] = True
def setOnChange(self, func):
assert(callable(func))
self.__dict__['onChange'] = func
def __repr__(self):
return str(self.contents)
def __getattr__(self, name):
return self.__dict__['contents'][name]
def __setattr__(self, k, v):
if self.__dict__['locked']:
raise AttributeError('This attribute wrapper is locked. You cannot change its values.')
self.contents[k] = v
if self.__dict__['onChange'] is not None:
self.__dict__['onChange'](k, v)
def resolveInfo(nexus, path):
'''
Given a path to the config file, load its contents and assign it to the
config file as appropriate.
'''
# Check to make sure we have a default settings file
if not os.path.isfile(path):
createDefaultInfo(path)
contents = pdosq.read_yaml_file(path)
# Sanity check contents of info and throw it out if bad
if not validateInfo(contents):
output.out.err('Saved configuration data invalid, destroying it.')
os.remove(path)
createDefaultInfo(path)
contents = pdosq.read_yaml_file(path, default={})
writeYaml(contents, path)
nexus.info.pdid = contents['pdid']
nexus.info.version = contents['version']
nexus.info.pdserver = contents['pdserver']
nexus.info.wampRouter = contents['wampRouter']
def createDefaultInfo(path):
default = {
'version': 1,
'pdid': None,
'pdserver': settings.PDSERVER,
'wampRouter': settings.WAMP_ROUTER
}
writeYaml(default, path)
def validateInfo(contents):
'''
Error checking on the read YAML file. This is a temporary method.
:
param contents:
the read - in yaml to check
:
type contents:
dict.
:
returns:
True if valid, else false
'''
INFO_REQUIRES = ['version', 'pdid', 'pdserver', 'wampRouter']
for k in INFO_REQUIRES:
if k not in contents:
output.out.err('Contents is missing: ' + str(k))
return False
return True
def writeYaml(contents, path):
''' Overwrites content with YAML representation at given path '''
# print 'Writing ' + str(contents) + ' to path ' + str(path)
with open(path, 'w') as f:
f.write(yaml.safe_dump(contents, default_flow_style=False))
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import traceback
from thrift.protocol.THeaderProtocol import THeaderProtocol
from thrift.server.TServer import TServer, TConnectionContext
from thrift.transport.THeaderTransport import THeaderTransport
from thrift.transport.TTransport import TMemoryBuffer
from thrift.server.CppServerWrapper import CppServerWrapper, CppContextData, \
SSLPolicy, SSLVerifyPeerEnum, SSLVersion, CallbackWrapper, CallTimestamps
from concurrent.futures import Future
from functools import partial
import time
# Default sampling rate for expensive sampling operations, such as histogram
# counters.
kDefaultSampleRate = 32
class TCppConnectionContext(TConnectionContext):
def __init__(self, context_data):
self.context_data = context_data
def getClientPrincipal(self):
return self.context_data.getClientIdentity()
def getClientPrincipalUser(self):
principal = self.getClientPrincipal()
if not principal:
return None
user, match, domain = principal.partition('@')
if match:
return user
return None
def getPeerName(self):
return self.context_data.getPeerAddress()
def getSockName(self):
return self.context_data.getLocalAddress()
class _ProcessorAdapter(object):
CONTEXT_DATA = CppContextData
CALLBACK_WRAPPER = CallbackWrapper
def __init__(self, processor):
self.processor = processor
self.observer = None
self.sampleRate = kDefaultSampleRate
self.sampleCount = 0
def setObserver(self, observer):
self.observer = observer
def _shouldSample(self):
self.sampleCount = (self.sampleCount + 1) % self.sampleRate
return self.sampleCount == 0
# TODO mhorowitz: add read headers here, so they can be added to
# the constructed header buffer. Also add endpoint addrs to the
# context
def call_processor(self, input, headers, client_type, protocol_type,
context_data, callback):
try:
# The input string has already had the header removed, but
# the python processor will expect it to be there. In
# order to reconstitute the message with headers, we use
# the THeaderProtocol object to write into a memory
# buffer, then pass that buffer to the python processor.
should_sample = self._shouldSample()
timestamps = CallTimestamps()
timestamps.processBegin = 0
timestamps.processEnd = 0
if self.observer and should_sample:
timestamps.processBegin = int(time.time() * 10**6)
write_buf = TMemoryBuffer()
trans = THeaderTransport(write_buf)
trans._THeaderTransport__client_type = client_type
trans._THeaderTransport__write_headers = headers
trans.set_protocol_id(protocol_type)
trans.write(input)
trans.flush()
prot_buf = TMemoryBuffer(write_buf.getvalue())
prot = THeaderProtocol(prot_buf, client_types=[client_type])
ctx = TCppConnectionContext(context_data)
ret = self.processor.process(prot, prot, ctx)
done_callback = partial(_ProcessorAdapter.done,
prot_buf=prot_buf,
client_type=client_type,
callback=callback)
if self.observer:
if should_sample:
timestamps.processEnd = int(time.time() * 10**6)
# This only bumps counters if `processBegin != 0` and
# `processEnd != 0` and these will only be non-zero if
# we are sampling this request.
self.observer.callCompleted(timestamps)
# This future is created by and returned from the processor's
# ThreadPoolExecutor, which keeps a reference to it. So it is
# fine for this future to end its lifecycle here.
if isinstance(ret, Future):
ret.add_done_callback(lambda x, d=done_callback: d())
else:
done_callback()
except:
# Don't let exceptions escape back into C++
traceback.print_exc()
@staticmethod
def done(prot_buf, client_type, callback):
try:
response = prot_buf.getvalue()
if len(response) == 0:
callback.call(response)
else:
# And on the way out, we need to strip off the header,
# because the C++ code will expect to add it.
read_buf = TMemoryBuffer(response)
trans = THeaderTransport(read_buf, client_types=[client_type])
trans.readFrame(len(response))
callback.call(trans.cstringio_buf.read())
except:
traceback.print_exc()
def oneway_methods(self):
return self.processor.onewayMethods()
class TSSLConfig(object):
def __init__(self):
self.cert_path = ''
self.key_path = ''
self.key_pw_path = ''
self.client_ca_path = ''
self.ecc_curve_name = ''
self.verify = SSLVerifyPeerEnum.VERIFY
self.ssl_policy = SSLPolicy.PERMITTED
self.ticket_file_path = ''
self.alpn_protocols = []
self.session_context = None
self.ssl_version = None
@property
def ssl_version(self):
return self._ssl_version
@ssl_version.setter
def ssl_version(self, val):
if val is not None and not isinstance(val, SSLVersion):
raise ValueError("{} is an invalid version".format(val))
self._ssl_version = val
@property
def ssl_policy(self):
return self._ssl_policy
@ssl_policy.setter
def ssl_policy(self, val):
if not isinstance(val, SSLPolicy):
raise ValueError("{} is an invalid policy".format(val))
self._ssl_policy = val
@property
def verify(self):
return self._verify
@verify.setter
def verify(self, val):
if not isinstance(val, SSLVerifyPeerEnum):
raise ValueError("{} is an invalid value".format(val))
self._verify = val
class TSSLCacheOptions(object):
def __init__(self):
self.ssl_cache_timeout_seconds = 86400
self.max_ssl_cache_size = 20480
self.ssl_cache_flush_size = 200
class TCppServer(CppServerWrapper, TServer):
def __init__(self, processor):
CppServerWrapper.__init__(self)
self.processor = self._getProcessor(processor)
self.processorAdapter = _ProcessorAdapter(self.processor)
self.setAdapter(self.processorAdapter)
self._setup_done = False
self.serverEventHandler = None
def setObserver(self, observer):
self.processorAdapter.setObserver(observer)
CppServerWrapper.setObserver(self, observer)
def setServerEventHandler(self, handler):
TServer.setServerEventHandler(self, handler)
handler.CONTEXT_DATA = CppContextData
handler.CPP_CONNECTION_CONTEXT = TCppConnectionContext
self.setCppServerEventHandler(handler)
def setSSLConfig(self, config):
if not isinstance(config, TSSLConfig):
raise ValueError("Config must be of type TSSLConfig")
self.setCppSSLConfig(config)
def setSSLCacheOptions(self, cache_options):
if not isinstance(cache_options, TSSLCacheOptions):
raise ValueError("Options might be of type TSSLCacheOptions")
self.setCppSSLCacheOptions(cache_options)
def setFastOpenOptions(self, enabled, tfo_max_queue):
self.setCppFastOpenOptions(enabled, tfo_max_queue)
def getTicketSeeds(self):
return self.getCppTicketSeeds()
def setup(self):
if self._setup_done:
return
CppServerWrapper.setup(self)
# Task expire isn't supported in Python
CppServerWrapper.setTaskExpireTime(self, 0)
if self.serverEventHandler is not None:
self.serverEventHandler.preServe(self.getAddress())
self._setup_done = True
def loop(self):
if not self._setup_done:
raise RuntimeError(
"setup() must be called before loop()")
CppServerWrapper.loop(self)
def serve(self):
self.setup()
try:
self.loop()
finally:
self.cleanUp()
| |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
def test_recovery_from_coordinator_exception(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def test_recovery_from_non_preemption_in_coordinator(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
def test_recovery_from_session_getting_stuck(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
| |
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
import subprocess
import markdown
from django.utils.html import escape
from markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension
from zerver.lib.str_utils import force_bytes
from zerver.lib.tex import render_tex
from typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union, Text
# Global vars
FENCE_RE = re.compile(u"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(
\\{?\\.?
(?P<lang>
[a-zA-Z0-9_+-./#]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
[ ]* # spaces
$
""", re.VERBOSE)
CODE_WRAP = u'<pre><code%s>%s\n</code></pre>'
LANG_TAG = u' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
# type: (markdown.Markdown, Dict[str, Any]) -> None
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
# Newer versions of Python-Markdown (starting at 2.3?) have
# a normalize_whitespace preprocessor that needs to go first.
position = ('>normalize_whitespace'
if 'normalize_whitespace' in md.preprocessors
else '_begin')
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
position)
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def __init__(self, md):
# type: (markdown.Markdown) -> None
markdown.preprocessors.Preprocessor.__init__(self, md)
self.checked_for_codehilite = False
self.codehilite_conf = {} # type: Dict[str, List[Any]]
def run(self, lines):
# type: (Iterable[Text]) -> List[Text]
""" Match and store Fenced Code Blocks in the HtmlStash. """
output = [] # type: List[Text]
class BaseHandler(object):
def handle_line(self, line):
# type: (Text) -> None
raise NotImplementedError()
def done(self):
# type: () -> None
raise NotImplementedError()
processor = self
handlers = [] # type: List[BaseHandler]
def push(handler):
# type: (BaseHandler) -> None
handlers.append(handler)
def pop():
# type: () -> None
handlers.pop()
def check_for_new_fence(output, line):
# type: (MutableSequence[Text], Text) -> None
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
handler = generic_handler(output, fence, lang)
push(handler)
else:
output.append(line)
class OuterHandler(BaseHandler):
def __init__(self, output):
# type: (MutableSequence[Text]) -> None
self.output = output
def handle_line(self, line):
# type: (Text) -> None
check_for_new_fence(self.output, line)
def done(self):
# type: () -> None
pop()
def generic_handler(output, fence, lang):
# type: (MutableSequence[Text], Text, Text) -> BaseHandler
if lang in ('quote', 'quoted'):
return QuoteHandler(output, fence)
elif lang in ('math', 'tex', 'latex'):
return TexHandler(output, fence)
else:
return CodeHandler(output, fence, lang)
class CodeHandler(BaseHandler):
def __init__(self, output, fence, lang):
# type: (MutableSequence[Text], Text, Text) -> None
self.output = output
self.fence = fence
self.lang = lang
self.lines = [] # type: List[Text]
def handle_line(self, line):
# type: (Text) -> None
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self):
# type: () -> None
text = '\n'.join(self.lines)
text = processor.format_code(self.lang, text)
text = processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
pop()
class QuoteHandler(BaseHandler):
def __init__(self, output, fence):
# type: (MutableSequence[Text], Text) -> None
self.output = output
self.fence = fence
self.lines = [] # type: List[Text]
def handle_line(self, line):
# type: (Text) -> None
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.lines, line)
def done(self):
# type: () -> None
text = '\n'.join(self.lines)
text = processor.format_quote(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
pop()
class TexHandler(BaseHandler):
def __init__(self, output, fence):
# type: (MutableSequence[Text], Text) -> None
self.output = output
self.fence = fence
self.lines = [] # type: List[Text]
def handle_line(self, line):
# type: (Text) -> None
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.lines, line)
def done(self):
# type: () -> None
text = '\n'.join(self.lines)
text = processor.format_tex(text)
text = processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
pop()
handler = OuterHandler(output)
push(handler)
for line in lines:
handlers[-1].handle_line(line)
while handlers:
handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Bugdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != '':
output.append('')
return output
def format_code(self, lang, text):
# type: (Text, Text) -> Text
if lang:
langclass = LANG_TAG % (lang,)
else:
langclass = ''
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.markdown.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(text,
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
use_pygments=self.codehilite_conf['use_pygments'][0],
lang=(lang or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (langclass, self._escape(text))
return code
def format_quote(self, text):
# type: (Text) -> Text
paragraphs = text.split("\n\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines if line != ''))
return "\n\n".join(quoted_paragraphs)
def format_tex(self, text):
# type: (Text) -> Text
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' +
escape(paragraph) + '</span>')
return "\n\n".join(tex_paragraphs)
def placeholder(self, code):
# type: (Text) -> Text
return self.markdown.htmlStash.store(code, safe=True)
def _escape(self, txt):
# type: (Text) -> Text
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(*args, **kwargs):
# type: (*Any, **Union[bool, None, Text]) -> FencedCodeExtension
return FencedCodeExtension(*args, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from jsonpath_rw import parse
from st2common import log as logging
import st2common.operators as criteria_operators
from st2common.constants.rules import TRIGGER_PAYLOAD_PREFIX, RULE_TYPE_BACKSTOP
from st2common.constants.keyvalue import SYSTEM_SCOPE
from st2common.services.keyvalues import KeyValueLookup
from st2common.util.templating import render_template_with_system_context
LOG = logging.getLogger('st2reactor.ruleenforcement.filter')
class RuleFilter(object):
def __init__(self, trigger_instance, trigger, rule, extra_info=False):
"""
:param trigger_instance: TriggerInstance DB object.
:type trigger_instance: :class:`TriggerInstanceDB``
:param trigger: Trigger DB object.
:type trigger: :class:`TriggerDB`
:param rule: Rule DB object.
:type rule: :class:`RuleDB`
"""
self.trigger_instance = trigger_instance
self.trigger = trigger
self.rule = rule
self.extra_info = extra_info
# Base context used with a logger
self._base_logger_context = {
'rule': self.rule,
'trigger': self.trigger,
'trigger_instance': self.trigger_instance
}
def filter(self):
"""
Return true if the rule is applicable to the provided trigger instance.
:rtype: ``bool``
"""
LOG.info('Validating rule %s for %s.', self.rule.ref, self.trigger['name'],
extra=self._base_logger_context)
if not self.rule.enabled:
if self.extra_info:
LOG.info('Validation failed for rule %s as it is disabled.', self.rule.ref)
return False
criteria = self.rule.criteria
is_rule_applicable = True
if criteria and not self.trigger_instance.payload:
return False
payload_lookup = PayloadLookup(self.trigger_instance.payload)
LOG.debug('Trigger payload: %s', self.trigger_instance.payload,
extra=self._base_logger_context)
for criterion_k in criteria.keys():
criterion_v = criteria[criterion_k]
is_rule_applicable, payload_value, criterion_pattern = self._check_criterion(
criterion_k, criterion_v, payload_lookup)
if not is_rule_applicable:
if self.extra_info:
criteria_extra_info = '\n'.join([
' key: %s' % criterion_k,
' pattern: %s' % criterion_pattern,
' type: %s' % criterion_v['type'],
' payload: %s' % payload_value
])
LOG.info('Validation for rule %s failed on criteria -\n%s', self.rule.ref,
criteria_extra_info,
extra=self._base_logger_context)
break
if not is_rule_applicable:
LOG.debug('Rule %s not applicable for %s.', self.rule.id, self.trigger['name'],
extra=self._base_logger_context)
return is_rule_applicable
def _check_criterion(self, criterion_k, criterion_v, payload_lookup):
if 'type' not in criterion_v:
# Comparison operator type not specified, can't perform a comparison
return False
criteria_operator = criterion_v['type']
criteria_pattern = criterion_v.get('pattern', None)
# Render the pattern (it can contain a jinja expressions)
try:
criteria_pattern = self._render_criteria_pattern(criteria_pattern=criteria_pattern)
except Exception:
LOG.exception('Failed to render pattern value "%s" for key "%s"' %
(criteria_pattern, criterion_k), extra=self._base_logger_context)
return False
try:
matches = payload_lookup.get_value(criterion_k)
# pick value if only 1 matches else will end up being an array match.
if matches:
payload_value = matches[0] if len(matches) > 0 else matches
else:
payload_value = None
except:
LOG.exception('Failed transforming criteria key %s', criterion_k,
extra=self._base_logger_context)
return False
op_func = criteria_operators.get_operator(criteria_operator)
try:
result = op_func(value=payload_value, criteria_pattern=criteria_pattern)
except:
LOG.exception('There might be a problem with the criteria in rule %s.', self.rule,
extra=self._base_logger_context)
return False
return result, payload_value, criteria_pattern
def _render_criteria_pattern(self, criteria_pattern):
# Note: Here we want to use strict comparison to None to make sure that
# other falsy values such as integer 0 are handled correctly.
if criteria_pattern is None:
return None
if not isinstance(criteria_pattern, six.string_types):
# We only perform rendering if value is a string - rendering a non-string value
# makes no sense
return criteria_pattern
criteria_pattern = render_template_with_system_context(value=criteria_pattern)
return criteria_pattern
class SecondPassRuleFilter(RuleFilter):
"""
Special filter that handles all second pass rules. For not these are only
backstop rules i.e. those that can match when no other rule has matched.
"""
def __init__(self, trigger_instance, trigger, rule, first_pass_matched):
"""
:param trigger_instance: TriggerInstance DB object.
:type trigger_instance: :class:`TriggerInstanceDB``
:param trigger: Trigger DB object.
:type trigger: :class:`TriggerDB`
:param rule: Rule DB object.
:type rule: :class:`RuleDB`
:param first_pass_matched: Rules that matched in the first pass.
:type first_pass_matched: `list`
"""
super(SecondPassRuleFilter, self).__init__(trigger_instance, trigger, rule)
self.first_pass_matched = first_pass_matched
def filter(self):
# backstop rules only apply if no rule matched in the first pass.
if self.first_pass_matched and self._is_backstop_rule():
return False
return super(SecondPassRuleFilter, self).filter()
def _is_backstop_rule(self):
return self.rule.type['ref'] == RULE_TYPE_BACKSTOP
class PayloadLookup(object):
def __init__(self, payload):
self._context = {
SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE),
TRIGGER_PAYLOAD_PREFIX: payload
}
def get_value(self, lookup_key):
expr = parse(lookup_key)
matches = [match.value for match in expr.find(self._context)]
if not matches:
return None
return matches
| |
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: wangtaize@baidu.com
# Date: 2015-04-06
import datetime
import logging
from sofa.pbrpc import client
from galaxy import master_pb2
STATE_MAP={0:'DEPLOYING',2:'RUNNING',3:'KILLED',4:'RESTART',5:'ERROR',6:'COMPLETE'}
LOG = logging.getLogger('console')
class BaseEntity(object):
def __setattr__(self,name,value):
self.__dict__[name] = value
def __getattr__(self,name):
return self.__dict__.get(name,None)
class GalaxySDK(object):
"""
galaxy python sdk
"""
def __init__(self, master_addr):
self.channel = client.Channel(master_addr)
def list_all_node(self):
"""
list all node of galaxy master
return:
if error ,None will be return
"""
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
request = master_pb2.ListNodeRequest()
try:
response = master.ListNode(controller,request)
if not response:
LOG.error('fail to call list node')
return []
ret = []
for node in response.nodes:
base = BaseEntity()
base.id = node.node_id
base.node_id = node.node_id
base.addr = node.addr
base.task_num = node.task_num
base.cpu_share = node.cpu_share
base.mem_share = node.mem_share
base.cpu_allocated = node.cpu_allocated
base.mem_allocated = node.mem_allocated
base.mem_used = node.mem_used
base.cpu_used = node.cpu_used
ret.append(base)
return ret
except:
LOG.exception("fail to call list node")
return []
def make_job(self,name,pkg_type,
pkg_src,boot_cmd,
replicate_num = 1,
mem_limit = 1024,
cpu_limit = 2,
deploy_step_size = -1,
one_task_per_host = False,
restrict_tags = []):
"""
send a new job command to galaxy master
return:
"""
assert name
assert pkg_type
assert pkg_src
assert boot_cmd
req = self._build_new_job_req(name,pkg_type,str(pkg_src),
boot_cmd,
replicate_num = replicate_num,
mem_limit = mem_limit,
cpu_limit = cpu_limit,
deploy_step_size = deploy_step_size,
one_task_per_host = one_task_per_host,
restrict_tags = restrict_tags)
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.NewJob(controller,req)
if not response:
LOG.error("fail to create job")
return False,None
if response.status == 0:
return True,response.job_id
return False,response.job_id
except:
LOG.exception("fail to create job")
return False,None
def tag_agent(self, tag, agent_set):
entity = master_pb2.TagEntity(tag = tag,
agents = agent_set)
request = master_pb2.TagAgentRequest(tag_entity = entity)
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.TagAgent(controller, request)
if response.status == 0 :
return True
return False
except:
LOG.exception("fail to tag agent")
return False
def list_tag(self):
request = master_pb2.ListTagRequest()
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.ListTag(controller, request)
ret = []
for tag in response.tags:
base = BaseEntity()
base.tag = tag.tag
base.agents = [agent for agent in tag.agents]
ret.append(base.__dict__)
return ret
except Exception as e:
LOG.exception("fail to list tag %s"%str(e))
return []
def list_all_job(self):
request = master_pb2.ListJobRequest()
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.ListJob(controller,request)
if not response:
return False,[]
ret = []
for job in response.jobs:
base = BaseEntity()
base.job_id = job.job_id
base.job_name = job.job_name
base.running_task_num = job.running_task_num
base.replica_num = job.replica_num
ret.append(base)
return True,ret
except:
LOG.exception('fail to list jobs')
return False,[]
def update_job(self,id,replicate_num):
req = master_pb2.UpdateJobRequest()
req.job_id = int(id)
req.replica_num = int(replicate_num)
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.UpdateJob(controller,req)
if not response or response.status != 0 :
return False
return True
except:
LOG.exception('fail to update job')
return False
def list_task_by_host(self,host):
req = master_pb2.ListTaskRequest()
req.agent_addr = host
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
response = master.ListTask(controller,req)
if not response:
LOG.error('fail to list task %s'%job_id)
return False,[]
ret = []
for task in response.tasks:
base = BaseEntity()
base.id = task.info.task_id
base.status = STATE_MAP[task.status]
base.name = task.info.task_name
base.agent_addr = task.agent_addr
base.job_id = task.job_id
base.offset = task.offset
base.mem_limit = task.info.required_mem
base.cpu_limit = task.info.required_cpu
base.mem_used = task.memory_usage
base.cpu_used = task.cpu_usage
base.start_time = task.start_time
ret.append(base)
return True,ret
except:
LOG.exception('fail to list task')
return False,[]
def list_task_by_job_id(self,job_id):
req = master_pb2.ListTaskRequest()
req.job_id = job_id
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(3.5)
try:
response = master.ListTask(controller,req)
if not response:
LOG.error('fail to list task %s'%job_id)
return False,[]
ret = []
for task in response.tasks:
base = BaseEntity()
base.id = task.info.task_id
base.status = STATE_MAP[task.status]
base.name = task.info.task_name
base.agent_addr = task.agent_addr
base.job_id = task.job_id
base.offset = task.offset
base.mem_limit = task.info.required_mem
base.cpu_limit = task.info.required_cpu
base.mem_used = task.memory_usage
base.cpu_used = task.cpu_usage
base.start_time = task.start_time
ret.append(base)
return True,ret
except:
LOG.exception('fail to list task')
return False,[]
def get_scheduled_history(self,job_id):
req = master_pb2.ListTaskRequest()
req.job_id = job_id
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(3.5)
try:
response = master.ListTask(controller,req)
if not response:
LOG.error('fail to list task %s'%job_id)
return False,[]
ret = []
for task in response.scheduled_tasks:
base = BaseEntity()
base.id = task.info.task_id
base.status = STATE_MAP[task.status]
base.name = task.info.task_name
base.agent_addr = task.agent_addr
base.job_id = task.job_id
base.offset = task.offset
base.mem_limit = task.info.required_mem
base.cpu_limit = task.info.required_cpu
base.mem_used = task.memory_usage
base.cpu_used = task.cpu_usage
base.start_time = task.start_time
base.gc_path = task.root_path
base.end_time = datetime.datetime.fromtimestamp(task.end_time).strftime("%m-%d %H:%M:%S")
ret.append(base)
return True,ret
except:
LOG.exception('fail to list task history')
return False,[]
def kill_job(self,job_id):
req = master_pb2.KillJobRequest()
req.job_id = job_id
master = master_pb2.Master_Stub(self.channel)
controller = client.Controller()
controller.SetTimeout(1.5)
try:
master.KillJob(controller,req)
except:
LOG.exception('fail to kill job')
def _build_new_job_req(self,name,pkg_type,
pkg_src,boot_cmd,
replicate_num = 1,
mem_limit= 1024,
cpu_limit= 2,
deploy_step_size=-1,
one_task_per_host=False,
restrict_tags = []):
req = master_pb2.NewJobRequest(restrict_tags = set(restrict_tags))
if deploy_step_size > 0:
req.deploy_step_size = deploy_step_size
req.job_name = name
req.job_raw = pkg_src
req.cmd_line = boot_cmd
req.cpu_share = cpu_limit
req.mem_share = mem_limit
req.replica_num = replicate_num
req.one_task_per_host = one_task_per_host
return req
| |
#!/usr/bin/env python3
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Process Android resource directories to generate .resources.zip and R.txt
files."""
import argparse
import os
import shutil
import sys
import zipfile
from util import build_utils
from util import jar_info_utils
from util import md5_check
from util import resources_parser
from util import resource_utils
def _ParseArgs(args):
"""Parses command line options.
Returns:
An options object as from argparse.ArgumentParser.parse_args()
"""
parser = argparse.ArgumentParser(description=__doc__)
build_utils.AddDepfileOption(parser)
parser.add_argument('--res-sources-path',
required=True,
help='Path to a list of input resources for this target.')
parser.add_argument(
'--r-text-in',
help='Path to pre-existing R.txt. Its resource IDs override those found '
'in the generated R.txt when generating R.java.')
parser.add_argument(
'--allow-missing-resources',
action='store_true',
help='Do not fail if some resources exist in the res/ dir but are not '
'listed in the sources.')
parser.add_argument(
'--resource-zip-out',
help='Path to a zip archive containing all resources from '
'--resource-dirs, merged into a single directory tree.')
parser.add_argument('--r-text-out',
help='Path to store the generated R.txt file.')
parser.add_argument('--strip-drawables',
action="store_true",
help='Remove drawables from the resources.')
options = parser.parse_args(args)
with open(options.res_sources_path) as f:
options.sources = f.read().splitlines()
options.resource_dirs = resource_utils.DeduceResourceDirsFromFileList(
options.sources)
return options
def _CheckAllFilesListed(resource_files, resource_dirs):
resource_files = set(resource_files)
missing_files = []
for path, _ in resource_utils.IterResourceFilesInDirectories(resource_dirs):
if path not in resource_files:
missing_files.append(path)
if missing_files:
sys.stderr.write('Error: Found files not listed in the sources list of '
'the BUILD.gn target:\n')
for path in missing_files:
sys.stderr.write('{}\n'.format(path))
sys.exit(1)
def _ZipResources(resource_dirs, zip_path, ignore_pattern):
# ignore_pattern is a string of ':' delimited list of globs used to ignore
# files that should not be part of the final resource zip.
files_to_zip = []
path_info = resource_utils.ResourceInfoFile()
for index, resource_dir in enumerate(resource_dirs):
attributed_aar = None
if not resource_dir.startswith('..'):
aar_source_info_path = os.path.join(
os.path.dirname(resource_dir), 'source.info')
if os.path.exists(aar_source_info_path):
attributed_aar = jar_info_utils.ReadAarSourceInfo(aar_source_info_path)
for path, archive_path in resource_utils.IterResourceFilesInDirectories(
[resource_dir], ignore_pattern):
attributed_path = path
if attributed_aar:
attributed_path = os.path.join(attributed_aar, 'res',
path[len(resource_dir) + 1:])
# Use the non-prefixed archive_path in the .info file.
path_info.AddMapping(archive_path, attributed_path)
resource_dir_name = os.path.basename(resource_dir)
archive_path = '{}_{}/{}'.format(index, resource_dir_name, archive_path)
files_to_zip.append((archive_path, path))
path_info.Write(zip_path + '.info')
with zipfile.ZipFile(zip_path, 'w') as z:
# This magic comment signals to resource_utils.ExtractDeps that this zip is
# not just the contents of a single res dir, without the encapsulating res/
# (like the outputs of android_generated_resources targets), but instead has
# the contents of possibly multiple res/ dirs each within an encapsulating
# directory within the zip.
z.comment = resource_utils.MULTIPLE_RES_MAGIC_STRING
build_utils.DoZip(files_to_zip, z)
def _GenerateRTxt(options, r_txt_path):
"""Generate R.txt file.
Args:
options: The command-line options tuple.
r_txt_path: Locates where the R.txt file goes.
"""
ignore_pattern = resource_utils.AAPT_IGNORE_PATTERN
if options.strip_drawables:
ignore_pattern += ':*drawable*'
resources_parser.RTxtGenerator(options.resource_dirs,
ignore_pattern).WriteRTxtFile(r_txt_path)
def _OnStaleMd5(options):
with resource_utils.BuildContext() as build:
if options.sources and not options.allow_missing_resources:
_CheckAllFilesListed(options.sources, options.resource_dirs)
if options.r_text_in:
r_txt_path = options.r_text_in
else:
_GenerateRTxt(options, build.r_txt_path)
r_txt_path = build.r_txt_path
if options.r_text_out:
shutil.copyfile(r_txt_path, options.r_text_out)
if options.resource_zip_out:
ignore_pattern = resource_utils.AAPT_IGNORE_PATTERN
if options.strip_drawables:
ignore_pattern += ':*drawable*'
_ZipResources(options.resource_dirs, options.resource_zip_out,
ignore_pattern)
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
# Order of these must match order specified in GN so that the correct one
# appears first in the depfile.
output_paths = [
options.resource_zip_out,
options.resource_zip_out + '.info',
options.r_text_out,
]
input_paths = [options.res_sources_path]
if options.r_text_in:
input_paths += [options.r_text_in]
# Resource files aren't explicitly listed in GN. Listing them in the depfile
# ensures the target will be marked stale when resource files are removed.
depfile_deps = []
resource_names = []
for resource_dir in options.resource_dirs:
for resource_file in build_utils.FindInDirectory(resource_dir, '*'):
# Don't list the empty .keep file in depfile. Since it doesn't end up
# included in the .zip, it can lead to -w 'dupbuild=err' ninja errors
# if ever moved.
if not resource_file.endswith(os.path.join('empty', '.keep')):
input_paths.append(resource_file)
depfile_deps.append(resource_file)
resource_names.append(os.path.relpath(resource_file, resource_dir))
# Resource filenames matter to the output, so add them to strings as well.
# This matters if a file is renamed but not changed (http://crbug.com/597126).
input_strings = sorted(resource_names) + [
options.strip_drawables,
]
# Since android_resources targets like *__all_dfm_resources depend on java
# targets that they do not need (in reality it only needs the transitive
# resource targets that those java targets depend on), md5_check is used to
# prevent outputs from being re-written when real inputs have not changed.
md5_check.CallAndWriteDepfileIfStale(lambda: _OnStaleMd5(options),
options,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths,
depfile_deps=depfile_deps)
if __name__ == '__main__':
main(sys.argv[1:])
| |
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from io import StringIO
from pygments.formatter import Formatter
from pygments.lexer import Lexer, do_insertions
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', 'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
'\\catcode`\\_=8\\relax}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{' + self.envname + '}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
# find and remove all the escape tokens (replace with an empty string)
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
buffered = ''
insertions = []
insertion_buf = []
for i, t, v in self._find_safe_escape_tokens(text):
if t is None:
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
insertion_buf = []
buffered += v
else:
insertion_buf.append((i, t, v))
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
return do_insertions(insertions,
self.lang.get_tokens_unprocessed(buffered))
def _find_safe_escape_tokens(self, text):
""" find escape tokens that are not in strings or comments """
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
yield i + i2, t2, v2
else:
yield i, None, v
def _filter_to(self, it, pred):
""" Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
def _find_escape_tokens(self, text):
""" Find escape tokens within text, give token=None otherwise """
index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
yield index, None, a
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
executor_manager.py
ExecutorManager makes you configure a system to execute tasks.
Every task is not executed immediatly, but according to a politics defined in
the object configuration.
Es:
ExecutorManager.avvia(sendMail, separateEmails)
ExecutorManager.execute((u'subject1', u'body1'))
...
ExecutorManager.stop()
sendMail is a function with arguments (subject, mail).
separateEmails takes a list of tuples (subject, mail) and returns a new tuple
(subject, mail) merging the bodies
Luca Bacchi <bacchilu@gmail.com> - http://www.lucabacchi.it
"""
import threading
class Queue(object):
def __init__(self):
self.cond = threading.Condition()
self.items = []
def put(self, item):
with self.cond:
self.items.append(item)
self.cond.notify()
def getAll(self):
"""
Blocking method. It returns a list instead of a single value.
It blocks until some values are in the queue. Then it return all the
values in a single list.
"""
with self.cond:
while len(self.items) == 0:
self.cond.wait()
(items, self.items) = (self.items, [])
return items
class ExecutorJob(threading.Thread):
def __init__(
self,
cb_fn,
merge_fn,
timeout=60.0,
):
threading.Thread.__init__(self)
self.cb_fn = cb_fn
self.merge_fn = merge_fn
self.timeout = timeout
self.q = Queue()
self.e = threading.Event()
def stop(self):
self.q.put(None)
self.e.set()
self.join()
def putArg(self, arg):
"""
Append the argument to be executed
"""
self.q.put(arg)
def getArguments(self):
"""
Extract all pending arguments from the queue and returns them.
"""
argsList = self.q.getAll()
ret = []
exit = False
for arg in argsList:
if arg is None:
exit = True
continue
ret.append(arg)
return (ret, exit)
def run(self):
"""
Get all messages, merges them and pass to the cb_fn function
"""
while True:
(argsList, exit) = self.getArguments()
if len(argsList) > 0:
arg = self.merge_fn(argsList)
self.cb_fn(*arg)
if exit:
return
self.e.wait(self.timeout)
from Queue import Queue as StlQueue
class ExecutorManager(threading.Thread):
t = None
q = StlQueue()
cb_fn = None
merge_fn = None
key_fn = None
job = {}
@staticmethod
def avvia(cb_fn, merge_fn, key_fn=None):
ExecutorManager.cb_fn = staticmethod(cb_fn)
ExecutorManager.merge_fn = staticmethod(merge_fn)
if key_fn is not None:
ExecutorManager.key_fn = staticmethod(key_fn)
else:
ExecutorManager.key_fn = None
ExecutorManager.t = ExecutorManager()
ExecutorManager.t.start()
@staticmethod
def stop():
ExecutorManager.q.put(None)
ExecutorManager.t.join()
for job in ExecutorManager.job.itervalues():
job.stop()
ExecutorManager.job = {}
@staticmethod
def execute(arg):
"""
Append the argument to be executed
"""
ExecutorManager.q.put(arg)
def sendJob(self, arg):
"""
This message is sent to correct job thread, according to the subject
"""
k = 'all'
if ExecutorManager.key_fn is not None:
k = ExecutorManager.key_fn(arg)
job = ExecutorManager.job.get(k,
ExecutorJob(ExecutorManager.cb_fn,
ExecutorManager.merge_fn))
ExecutorManager.job[k] = job
if not job.isAlive():
job.start()
job.putArg(arg)
def run(self):
"""
Get all messages, joins them and send.
"""
while True:
arg = ExecutorManager.q.get()
if arg is None:
return
self.sendJob(arg)
class QueueMail(threading.Thread):
def sendMail(subject, body):
"""
Mock of mail sender.
"""
return (subject, body)
@staticmethod
def separateEmails(argsList):
"""
The email bodies are separate by lines
"""
assert len(argsList) > 0
subject = argsList[0][0]
bodies = [body for (_, body) in argsList]
msg = ('\n' + '-' * 30 + '\n').join(bodies)
return (subject, msg)
@staticmethod
def getKey(arg):
"""
Returns the subject of the argument
"""
(subject, _) = arg
return subject
@staticmethod
def avvia(sendMail):
"""
Il servizio avvia un thread che ascolta su una coda per i messaggi da
spedire.
sendMail deve essere un callable con argomenti (subject, body)
"""
ExecutorManager.avvia(sendMail, QueueMail.separateEmails,
QueueMail.getKey)
@staticmethod
def stop():
"""
Mando un messaggio None che viene interpretato come segnale di uscita
"""
ExecutorManager.stop()
@staticmethod
def send(subject, body):
"""
Accoda il messaggio da spedire
"""
ExecutorManager.execute((subject, body))
| |
"""Docker runtime interface.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import multiprocessing
import os
import socket
import time
import docker
from treadmill import appcfg
from treadmill import appevents
from treadmill import context
from treadmill import exc
from treadmill import logcontext as lc
from treadmill import presence
from treadmill import runtime
from treadmill import zkutils
from treadmill.appcfg import abort as app_abort
from treadmill.apptrace import events
from treadmill.runtime import runtime_base
if os.name == 'nt':
from treadmill.ad import gmsa
from treadmill.ad import credential_spec
_LOGGER = logging.getLogger(__name__)
def _create_environ(app):
"""Creates environ object.
"""
appenv = {envvar.name: envvar.value for envvar in app.environ}
appenv.update({
'TREADMILL_CPU': app.cpu,
'TREADMILL_DISK': app.disk,
'TREADMILL_MEMORY': app.memory,
'TREADMILL_CELL': app.cell,
'TREADMILL_APP': app.app,
'TREADMILL_INSTANCEID': app.task,
'TREADMILL_IDENTITY': app.identity,
'TREADMILL_IDENTITY_GROUP': app.identity_group,
'TREADMILL_PROID': app.proid,
'TREADMILL_ENV': app.environment,
'TREADMILL_HOSTNAME': socket.getfqdn().lower()
})
for endpoint in app.endpoints:
envname = 'TREADMILL_ENDPOINT_{0}'.format(endpoint.name.upper())
appenv[envname] = str(endpoint.real_port)
appenv['TREADMILL_EPHEMERAL_TCP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.tcp]
)
appenv['TREADMILL_EPHEMERAL_UDP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.udp]
)
return appenv
def _get_gmsa(tm_env, client, app, container_args):
"""Waits on GMSA details and adds the credential spec to the args.
"""
check = gmsa.HostGroupCheck(tm_env)
count = 0
found = False
while count < 60:
found = check.host_in_proid_group(app.proid)
if found:
break
count += 1
time.sleep(1000)
if not found:
raise exc.ContainerSetupError(
'Image {0} was not found'.format(app.image),
app_abort.AbortedReason.GMSA
)
path = credential_spec.generate(app.proid, container_args['name'], client)
container_args['security_opt'] = [
'credentialspec={}'.format(path)
]
container_args['hostname'] = app.proid
def _create_container(tm_env, conf, client, app):
"""Create docker container from given app.
"""
ports = {}
for endpoint in app.endpoints:
port_key = '{0}/{1}'.format(endpoint.port, endpoint.proto)
ports[port_key] = endpoint.real_port
# app.image contains a uri which starts with docker://
image_name = app.image[9:]
client.images.pull(image_name)
name = appcfg.app_unique_name(app)
container_args = {
'image': image_name,
'name': name,
'environment': _create_environ(app),
'entrypoint': app.command,
'command': app.args,
'detach': True,
'tty': True,
'ports': ports,
'network': conf.get('network', 'nat'),
# 1024 is max number of shares for docker
'cpu_shares': int(
(app.cpu / (multiprocessing.cpu_count() * 100.0)) * 1024),
'mem_limit': app.memory,
'storage_opt': {
'size': app.disk
}
}
if os.name == 'nt':
_get_gmsa(tm_env, client, app, container_args)
try:
# The container might exist already
# TODO: start existing container with different ports
container = client.containers.get(name)
container.remove(force=True)
except docker.errors.NotFound:
pass
return client.containers.create(**container_args)
def _check_aborted(container_dir):
"""check if app was aborted and why.
"""
aborted = None
aborted_file = os.path.join(container_dir, 'aborted')
try:
with io.open(aborted_file) as f:
aborted = json.load(f)
except IOError:
_LOGGER.debug('aborted file does not exist: %r', aborted_file)
return aborted
class DockerRuntime(runtime_base.RuntimeBase):
"""Docker Treadmill runtime.
"""
name = 'docker'
__slots__ = (
'_client',
'_config'
)
def __init__(self, tm_env, container_dir, param=None):
super(DockerRuntime, self).__init__(tm_env, container_dir, param)
self._client = None
self._config = None
def _can_run(self, manifest):
try:
return appcfg.AppType(manifest['type']) is appcfg.AppType.DOCKER
except ValueError:
return False
def _get_config(self):
"""Gets the docker client.
"""
if self._config is not None:
return self._config
docker_conf = os.path.join(self._tm_env.configs_dir, 'docker.json')
try:
with io.open(docker_conf) as f:
self._config = json.load(f)
except IOError:
_LOGGER.error('docker config file does not exist: %r', docker_conf)
self._config = {}
return self._config
def _get_client(self):
"""Gets the docker client.
"""
if self._client is not None:
return self._client
self._client = docker.from_env(**self._param)
return self._client
def _run(self, manifest):
context.GLOBAL.zk.conn.add_listener(zkutils.exit_on_lost)
with lc.LogContext(_LOGGER, self._service.name,
lc.ContainerAdapter) as log:
log.info('Running %r', self._service.directory)
_sockets = runtime.allocate_network_ports(
'0.0.0.0', manifest
)
app = runtime.save_app(manifest, self._service.data_dir)
app_presence = presence.EndpointPresence(
context.GLOBAL.zk.conn,
manifest
)
app_presence.register_identity()
app_presence.register_running()
client = self._get_client()
try:
container = _create_container(
self._tm_env,
self._get_config(),
client,
app
)
except docker.errors.ImageNotFound:
raise exc.ContainerSetupError(
'Image {0} was not found'.format(app.image),
app_abort.AbortedReason.IMAGE
)
container.start()
container.reload()
_LOGGER.info('Container is running.')
app_presence.register_endpoints()
appevents.post(
self._tm_env.app_events_dir,
events.ServiceRunningTraceEvent(
instanceid=app.name,
uniqueid=app.uniqueid,
service='docker'
)
)
while container.status == 'running':
container.wait(timeout=10)
container.reload()
def _finish(self):
app = runtime.load_app(self._service.data_dir, runtime.STATE_JSON)
if app:
client = self._get_client()
container = state = None
name = appcfg.app_unique_name(app)
try:
container = client.containers.get(name)
state = container.attrs.get('State')
except docker.errors.NotFound:
pass
if container is not None:
try:
container.remove(force=True)
except docker.errors.APIError:
_LOGGER.error('Failed to remove %s', container.id)
aborted = _check_aborted(self._service.data_dir)
if aborted is not None:
app_abort.report_aborted(self._tm_env, app.name,
why=aborted.get('why'),
payload=aborted.get('payload'))
elif state is not None:
if state.get('OOMKilled', False):
event = events.KilledTraceEvent(
instanceid=app.name,
is_oom=True,
)
else:
event = events.FinishedTraceEvent(
instanceid=app.name,
rc=state.get('ExitCode', 256),
signal=0,
payload=state
)
appevents.post(self._tm_env.app_events_dir, event)
if os.name == 'nt':
credential_spec.cleanup(name, client)
try:
runtime.archive_logs(self._tm_env, name,
self._service.data_dir)
except Exception: # pylint: disable=W0703
_LOGGER.exception('Unexpected exception storing local logs.')
def kill(self):
app = runtime.load_app(self._service.data_dir, runtime.STATE_JSON)
if not app:
return
name = appcfg.app_unique_name(app)
try:
client = self._get_client()
container = client.containers.get(name)
container.kill()
except docker.errors.NotFound:
pass
| |
"""Tests for Sentry integration."""
import logging
from unittest.mock import MagicMock, Mock, patch
import pytest
from homeassistant.components.sentry import get_channel, process_before_send
from homeassistant.components.sentry.const import (
CONF_DSN,
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DOMAIN,
)
from homeassistant.const import __version__ as current_version
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test integration setup from entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1", CONF_ENVIRONMENT: "production"},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sentry.AioHttpIntegration"
) as sentry_aiohttp_mock, patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
) as sentry_sqlalchemy_mock, patch(
"homeassistant.components.sentry.LoggingIntegration"
) as sentry_logging_mock, patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# Test CONF_ENVIRONMENT is migrated to entry options
assert CONF_ENVIRONMENT not in entry.data
assert CONF_ENVIRONMENT in entry.options
assert entry.options[CONF_ENVIRONMENT] == "production"
assert sentry_logging_mock.call_count == 1
assert sentry_logging_mock.called_once_with(
level=logging.WARNING, event_level=logging.WARNING
)
assert sentry_aiohttp_mock.call_count == 1
assert sentry_sqlalchemy_mock.call_count == 1
assert sentry_mock.init.call_count == 1
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
}
assert call_args["dsn"] == "http://public@example.com/1"
assert call_args["environment"] == "production"
assert call_args["integrations"] == [
sentry_logging_mock.return_value,
sentry_aiohttp_mock.return_value,
sentry_sqlalchemy_mock.return_value,
]
assert call_args["release"] == current_version
assert call_args["before_send"]
async def test_setup_entry_with_tracing(hass: HomeAssistant) -> None:
"""Test integration setup from entry with tracing enabled."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1"},
options={CONF_TRACING: True, CONF_TRACING_SAMPLE_RATE: 0.5},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.sentry.AioHttpIntegration"), patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
), patch("homeassistant.components.sentry.LoggingIntegration"), patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
"traces_sample_rate",
}
assert call_args["traces_sample_rate"] == 0.5
@pytest.mark.parametrize(
"version,channel",
[
("0.115.0.dev20200815", "nightly"),
("0.115.0", "stable"),
("0.115.0b4", "beta"),
("0.115.0dev0", "dev"),
],
)
async def test_get_channel(version, channel) -> None:
"""Test if channel detection works from Home Assistant version number."""
assert get_channel(version) == channel
async def test_process_before_send(hass: HomeAssistant):
"""Test regular use of the Sentry process before sending function."""
hass.config.components.add("puppies")
hass.config.components.add("a_integration")
# These should not show up in the result.
hass.config.components.add("puppies.light")
hass.config.components.add("auth")
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot", "fridge_opener"],
event={},
hint={},
)
assert result
assert result["tags"]
assert result["contexts"]
assert result["contexts"]
ha_context = result["contexts"]["Home Assistant"]
assert ha_context["channel"] == "test"
assert ha_context["custom_components"] == "fridge_opener\nironing_robot"
assert ha_context["integrations"] == "a_integration\npuppies"
tags = result["tags"]
assert tags["channel"] == "test"
assert tags["uuid"] == "12345"
assert tags["installation_type"] == "pytest"
user = result["user"]
assert user["id"] == "12345"
async def test_event_with_platform_context(hass: HomeAssistant):
"""Test extraction of platform context information during Sentry events."""
current_platform_mock = Mock()
current_platform_mock.get().platform_name = "hue"
current_platform_mock.get().domain = "light"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "hue"
assert result["tags"]["platform"] == "light"
assert result["tags"]["custom_component"] == "no"
current_platform_mock.get().platform_name = "ironing_robot"
current_platform_mock.get().domain = "switch"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={CONF_EVENT_CUSTOM_COMPONENTS: True},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "ironing_robot"
assert result["tags"]["platform"] == "switch"
assert result["tags"]["custom_component"] == "yes"
@pytest.mark.parametrize(
"logger,tags",
[
("adguard", {"package": "adguard"}),
(
"homeassistant.components.hue.coordinator",
{"integration": "hue", "custom_component": "no"},
),
(
"homeassistant.components.hue.light",
{"integration": "hue", "platform": "light", "custom_component": "no"},
),
(
"homeassistant.components.ironing_robot.switch",
{
"integration": "ironing_robot",
"platform": "switch",
"custom_component": "yes",
},
),
(
"homeassistant.components.ironing_robot",
{"integration": "ironing_robot", "custom_component": "yes"},
),
("homeassistant.helpers.network", {"helpers": "network"}),
("tuyapi.test", {"package": "tuyapi"}),
],
)
async def test_logger_event_extraction(hass: HomeAssistant, logger, tags):
"""Test extraction of information from Sentry logger events."""
result = process_before_send(
hass,
options={
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
assert result
assert result["tags"] == {
"channel": "test",
"uuid": "12345",
"installation_type": "pytest",
**tags,
}
@pytest.mark.parametrize(
"logger,options,event",
[
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: True}, True),
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: False}, False),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: True},
True,
),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: False},
False,
),
],
)
async def test_filter_log_events(hass: HomeAssistant, logger, options, event):
"""Test filtering of events based on configuration options."""
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
if event:
assert result
else:
assert result is None
@pytest.mark.parametrize(
"handled,options,event",
[
("yes", {CONF_EVENT_HANDLED: True}, True),
("yes", {CONF_EVENT_HANDLED: False}, False),
("no", {CONF_EVENT_HANDLED: False}, True),
("no", {CONF_EVENT_HANDLED: True}, True),
],
)
async def test_filter_handled_events(hass: HomeAssistant, handled, options, event):
"""Tests filtering of handled events based on configuration options."""
event_mock = MagicMock()
event_mock.__iter__ = ["tags"]
event_mock.__contains__ = lambda _, val: val == "tags"
event_mock.tags = {"handled": handled}
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=[],
event=event_mock,
hint={},
)
if event:
assert result
else:
assert result is None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Dict, TYPE_CHECKING
from superset.dashboards.filter_sets.consts import (
DASHBOARD_OWNER_TYPE,
DESCRIPTION_FIELD,
JSON_METADATA_FIELD,
NAME_FIELD,
OWNER_ID_FIELD,
OWNER_TYPE_FIELD,
USER_OWNER_TYPE,
)
from tests.integration_tests.base_tests import login
from tests.integration_tests.dashboards.filter_sets.consts import (
ADMIN_USERNAME_FOR_TEST,
DASHBOARD_OWNER_USERNAME,
FILTER_SET_OWNER_USERNAME,
)
from tests.integration_tests.dashboards.filter_sets.utils import (
call_create_filter_set,
get_filter_set_by_dashboard_id,
get_filter_set_by_name,
)
if TYPE_CHECKING:
from flask.testing import FlaskClient
def assert_filterset_was_not_created(filter_set_data: Dict[str, Any]) -> None:
assert get_filter_set_by_name(str(filter_set_data["name"])) is None
def assert_filterset_was_created(filter_set_data: Dict[str, Any]) -> None:
assert get_filter_set_by_name(filter_set_data["name"]) is not None
class TestCreateFilterSetsApi:
def test_with_extra_field__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create["extra"] = "val"
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert response.json["message"]["extra"][0] == "Unknown field."
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_with_id_field__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create["id"] = 1
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert response.json["message"]["id"][0] == "Unknown field."
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_with_dashboard_not_exists__404(
self,
not_exists_dashboard: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# act
login(client, "admin")
response = call_create_filter_set(
client, not_exists_dashboard, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 404
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_name__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create.pop(NAME_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert get_filter_set_by_dashboard_id(dashboard_id) == []
def test_with_none_name__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[NAME_FIELD] = None
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_with_int_as_name__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[NAME_FIELD] = 4
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_description__201(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create.pop(DESCRIPTION_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_with_none_description__201(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[DESCRIPTION_FIELD] = None
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_with_int_as_description__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[DESCRIPTION_FIELD] = 1
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_json_metadata__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create.pop(JSON_METADATA_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_with_invalid_json_metadata__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[DESCRIPTION_FIELD] = {}
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_owner_type__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create.pop(OWNER_TYPE_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_with_invalid_owner_type__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = "OTHER_TYPE"
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_owner_id_when_owner_type_is_user__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create.pop(OWNER_ID_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_without_owner_id_when_owner_type_is_dashboard__201(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = DASHBOARD_OWNER_TYPE
valid_filter_set_data_for_create.pop(OWNER_ID_FIELD, None)
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_with_not_exists_owner__400(
self,
dashboard_id: int,
valid_filter_set_data_for_create: Dict[str, Any],
not_exists_user_id: int,
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = not_exists_user_id
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 400
assert_filterset_was_not_created(valid_filter_set_data_for_create)
def test_when_caller_is_admin_and_owner_is_admin__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
ADMIN_USERNAME_FOR_TEST
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_admin_and_owner_is_dashboard_owner__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
DASHBOARD_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_admin_and_owner_is_regular_user__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
FILTER_SET_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_admin_and_owner_type_is_dashboard__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = DASHBOARD_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = dashboard_id
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_dashboard_owner_and_owner_is_admin__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
ADMIN_USERNAME_FOR_TEST
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_dashboard_owner_and_owner_is_dashboard_owner__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
DASHBOARD_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_dashboard_owner_and_owner_is_regular_user__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
FILTER_SET_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_dashboard_owner_and_owner_type_is_dashboard__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = DASHBOARD_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = dashboard_id
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_regular_user_and_owner_is_admin__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
ADMIN_USERNAME_FOR_TEST
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_regular_user_and_owner_is_dashboard_owner__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
DASHBOARD_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_regular_user_and_owner_is_regular_user__201(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = USER_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = test_users[
FILTER_SET_OWNER_USERNAME
]
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 201
assert_filterset_was_created(valid_filter_set_data_for_create)
def test_when_caller_is_regular_user_and_owner_type_is_dashboard__403(
self,
dashboard_id: int,
test_users: Dict[str, int],
valid_filter_set_data_for_create: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
valid_filter_set_data_for_create[OWNER_TYPE_FIELD] = DASHBOARD_OWNER_TYPE
valid_filter_set_data_for_create[OWNER_ID_FIELD] = dashboard_id
# act
response = call_create_filter_set(
client, dashboard_id, valid_filter_set_data_for_create
)
# assert
assert response.status_code == 403
assert_filterset_was_not_created(valid_filter_set_data_for_create)
| |
import json
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.forms import widgets
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.models import Page
from wagtail.core.telepath import register
from wagtail.core.utils import resolve_model_string
from wagtail.core.widget_adapters import WidgetAdapter
from wagtail.utils.widgets import WidgetWithScript
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = "hidden"
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
show_clear_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object.
# DEPRECATED - subclasses should override WidgetWithScript.get_value_data instead
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
# DEPRECATED - subclasses should override WidgetWithScript.get_value_data instead
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super().value_from_datadict(data, files, name)
if result == "":
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if "choose_one_text" in kwargs:
self.choose_one_text = kwargs.pop("choose_one_text")
if "choose_another_text" in kwargs:
self.choose_another_text = kwargs.pop("choose_another_text")
if "clear_choice_text" in kwargs:
self.clear_choice_text = kwargs.pop("clear_choice_text")
if "link_to_chosen_text" in kwargs:
self.link_to_chosen_text = kwargs.pop("link_to_chosen_text")
if "show_edit_link" in kwargs:
self.show_edit_link = kwargs.pop("show_edit_link")
if "show_clear_link" in kwargs:
self.show_clear_link = kwargs.pop("show_clear_link")
super().__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _("Choose a page")
choose_another_text = _("Choose another page")
link_to_chosen_text = _("Edit this page")
def __init__(
self, target_models=None, can_choose_root=False, user_perms=None, **kwargs
):
super().__init__(**kwargs)
if target_models:
if not isinstance(target_models, (set, list, tuple)):
# assume we've been passed a single instance; wrap it as a list
target_models = [target_models]
# normalise the list of target models to a list of page classes
cleaned_target_models = []
for model in target_models:
try:
cleaned_target_models.append(resolve_model_string(model))
except (ValueError, LookupError):
raise ImproperlyConfigured(
"Could not resolve %r into a model. "
"Model names should be in the form app_label.model_name"
% (model,)
)
else:
cleaned_target_models = [Page]
if len(cleaned_target_models) == 1 and cleaned_target_models[0] is not Page:
model_name = cleaned_target_models[0]._meta.verbose_name.title()
self.choose_one_text += " (" + model_name + ")"
self.user_perms = user_perms
self.target_models = cleaned_target_models
self.can_choose_root = bool(can_choose_root)
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
@property
def model_names(self):
return [
"{app}.{model}".format(
app=model._meta.app_label, model=model._meta.model_name
)
for model in self.target_models
]
@property
def client_options(self):
# a JSON-serializable representation of the configuration options needed for the
# client-side behaviour of this widget
return {
"model_names": self.model_names,
"can_choose_root": self.can_choose_root,
"user_perms": self.user_perms,
}
def get_value_data(self, value):
if value is None:
return None
elif isinstance(value, Page):
page = value.specific
else: # assume page ID
model_class = self._get_lowest_common_page_class()
try:
page = model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
page = page.specific
parent_page = page.get_parent()
return {
"id": page.pk,
"display_title": page.get_admin_display_title(),
"parent_id": parent_page.pk if parent_page else None,
"edit_url": reverse("wagtailadmin_pages:edit", args=[page.pk]),
}
def render_html(self, name, value_data, attrs):
value_data = value_data or {}
original_field_html = super().render_html(name, value_data.get("id"), attrs)
return render_to_string(
"wagtailadmin/widgets/page_chooser.html",
{
"widget": self,
"original_field_html": original_field_html,
"attrs": attrs,
"value": bool(
value_data
), # only used by chooser.html to identify blank values
"display_title": value_data.get("display_title", ""),
"edit_url": value_data.get("edit_url", ""),
},
)
def render_js_init(self, id_, name, value_data):
value_data = value_data or {}
return "createPageChooser({id}, {parent}, {options});".format(
id=json.dumps(id_),
parent=json.dumps(value_data.get("parent_id")),
options=json.dumps(self.client_options),
)
@property
def media(self):
return forms.Media(
js=[
versioned_static("wagtailadmin/js/page-chooser-modal.js"),
versioned_static("wagtailadmin/js/page-chooser.js"),
]
)
class PageChooserAdapter(WidgetAdapter):
js_constructor = "wagtail.widgets.PageChooser"
def js_args(self, widget):
return [
widget.render_html("__NAME__", None, attrs={"id": "__ID__"}),
widget.id_for_label("__ID__"),
widget.client_options,
]
register(PageChooserAdapter(), AdminPageChooser)
| |
#!/usr/bin/env python
# This file was taken from Riverbank's examples,
# which was an adaptation of the original C++ Qt's examples.
from PySide import QtCore, QtGui
import animatedtiles_rc
# PyQt doesn't support deriving from more than one wrapped class so we use
# composition and delegate the property.
class Pixmap(QtCore.QObject):
def __init__(self, pix):
super(Pixmap, self).__init__()
self.pixmap_item = QtGui.QGraphicsPixmapItem(pix)
self.pixmap_item.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def set_pos(self, pos):
self.pixmap_item.setPos(pos)
def get_pos(self):
return self.pixmap_item.pos()
pos = QtCore.Property(QtCore.QPointF, get_pos, set_pos)
class Button(QtGui.QGraphicsWidget):
pressed = QtCore.Signal()
def __init__(self, pixmap, parent=None):
super(Button, self).__init__(parent)
self._pix = pixmap
self.setAcceptHoverEvents(True)
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def boundingRect(self):
return QtCore.QRectF(-65, -65, 130, 130)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def paint(self, painter, option, widget):
down = option.state & QtGui.QStyle.State_Sunken
r = self.boundingRect()
grad = QtGui.QLinearGradient(r.topLeft(), r.bottomRight())
if option.state & QtGui.QStyle.State_MouseOver:
color_0 = QtCore.Qt.white
else:
color_0 = QtCore.Qt.lightGray
color_1 = QtCore.Qt.darkGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.darkGray)
painter.setBrush(grad)
painter.drawEllipse(r)
color_0 = QtCore.Qt.darkGray
color_1 = QtCore.Qt.lightGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(grad)
if down:
painter.translate(2, 2)
painter.drawEllipse(r.adjusted(5, 5, -5, -5))
painter.drawPixmap(-self._pix.width() / 2, -self._pix.height() / 2,
self._pix)
def mousePressEvent(self, ev):
self.pressed.emit()
self.update()
def mouseReleaseEvent(self, ev):
self.update()
class View(QtGui.QGraphicsView):
def resizeEvent(self, event):
super(View, self).resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
if __name__ == '__main__':
import sys
import math
app = QtGui.QApplication(sys.argv)
kineticPix = QtGui.QPixmap(':/images/kinetic.png')
bgPix = QtGui.QPixmap(':/images/Time-For-Lunch-2.jpg')
scene = QtGui.QGraphicsScene(-350, -350, 700, 700)
items = []
for i in range(64):
item = Pixmap(kineticPix)
item.pixmap_item.setOffset(-kineticPix.width() / 2,
-kineticPix.height() / 2)
item.pixmap_item.setZValue(i)
items.append(item)
scene.addItem(item.pixmap_item)
# Buttons.
buttonParent = QtGui.QGraphicsRectItem()
ellipseButton = Button(QtGui.QPixmap(':/images/ellipse.png'), buttonParent)
figure8Button = Button(QtGui.QPixmap(':/images/figure8.png'), buttonParent)
randomButton = Button(QtGui.QPixmap(':/images/random.png'), buttonParent)
tiledButton = Button(QtGui.QPixmap(':/images/tile.png'), buttonParent)
centeredButton = Button(QtGui.QPixmap(':/images/centered.png'), buttonParent)
ellipseButton.setPos(-100, -100)
figure8Button.setPos(100, -100)
randomButton.setPos(0, 0)
tiledButton.setPos(-100, 100)
centeredButton.setPos(100, 100)
scene.addItem(buttonParent)
buttonParent.scale(0.75, 0.75)
buttonParent.setPos(200, 200)
buttonParent.setZValue(65)
# States.
rootState = QtCore.QState()
ellipseState = QtCore.QState(rootState)
figure8State = QtCore.QState(rootState)
randomState = QtCore.QState(rootState)
tiledState = QtCore.QState(rootState)
centeredState = QtCore.QState(rootState)
# Values.
for i, item in enumerate(items):
# Ellipse.
ellipseState.assignProperty(item, 'pos',
QtCore.QPointF(math.cos((i / 63.0) * 6.28) * 250,
math.sin((i / 63.0) * 6.28) * 250))
# Figure 8.
figure8State.assignProperty(item, 'pos',
QtCore.QPointF(math.sin((i / 63.0) * 6.28) * 250,
math.sin(((i * 2)/63.0) * 6.28) * 250))
# Random.
randomState.assignProperty(item, 'pos',
QtCore.QPointF(-250 + QtCore.qrand() % 500,
-250 + QtCore.qrand() % 500))
# Tiled.
tiledState.assignProperty(item, 'pos',
QtCore.QPointF(((i % 8) - 4) * kineticPix.width() + kineticPix.width() / 2,
((i // 8) - 4) * kineticPix.height() + kineticPix.height() / 2))
# Centered.
centeredState.assignProperty(item, 'pos', QtCore.QPointF())
# Ui.
view = View(scene)
view.setWindowTitle("Animated Tiles")
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setBackgroundBrush(QtGui.QBrush(bgPix))
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
view.show()
states = QtCore.QStateMachine()
states.addState(rootState)
states.setInitialState(rootState)
rootState.setInitialState(centeredState)
group = QtCore.QParallelAnimationGroup()
for i, item in enumerate(items):
anim = QtCore.QPropertyAnimation(item, 'pos')
anim.setDuration(750 + i * 25)
anim.setEasingCurve(QtCore.QEasingCurve.InOutBack)
group.addAnimation(anim)
trans = rootState.addTransition(ellipseButton.pressed, ellipseState)
trans.addAnimation(group)
trans = rootState.addTransition(figure8Button.pressed, figure8State)
trans.addAnimation(group)
trans = rootState.addTransition(randomButton.pressed, randomState)
trans.addAnimation(group)
trans = rootState.addTransition(tiledButton.pressed, tiledState)
trans.addAnimation(group)
trans = rootState.addTransition(centeredButton.pressed, centeredState)
trans.addAnimation(group)
timer = QtCore.QTimer()
timer.start(125)
timer.setSingleShot(True)
trans = rootState.addTransition(timer.timeout, ellipseState)
trans.addAnimation(group)
states.start()
sys.exit(app.exec_())
| |
"""
Created on 2015-05-30
@author: Valtyr Farshield
"""
import re
import sqlite3 as lite
from bb_common import BbCommon
class Epistatic:
def __init__(self, static_code, wh_class, stabletime, maxjump, maxmass, info):
self.static_code = static_code
self.wh_class = wh_class
self.stabletime = stabletime
self.maxjump = maxjump
self.maxmass = maxmass
self.info = info
def __str__(self):
message = "*{}* - Leads to: {}, Stable Time: {} hrs, Mass/Jump: {} kT, Max Mass: {} kT".format(
self.static_code, self.wh_class, self.stabletime, self.maxjump, self.maxmass)
if self.info is not None:
message += ", Info: {}".format(self.info)
return message
class Epiwh:
planet_types = ["Temperate", "Ice", "Gas", "Oceanic", "Lava", "Barren", "Storm", "Plasma", "Shattered"]
effect_types = ["Black Hole", "Cataclysmic Variable", "Magnetar", "No effect", "Pulsar", "Red Giant",
"Wolf-Rayet Star"]
# [T, I, G, O, L, B, S, P, Sh]
perfect_pi = [[1, 1, 1, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 1, 0]]
def __init__(self, sysId, name, wh_class, effect, radius, statics, targets, moons, planets, info):
self.sysId = sysId # internal Eve Id of system [private]
self.name = name # name of the wormhole (ex. J123450)
self.wh_class = wh_class # Wormhole class 1-6, 13-18
self.effect = effect # Wormhole effect (ex. Magnetar)
self.radius = radius # Distance from sun to furthest planet
self.statics = statics # List of statics (ex. B274 D385 Q003)
self.targets = targets # List of targets (ex. HS C2 NS)
self.moons = moons # Number of moons
self.planets = planets # List of planets in format [T, I, G, O, L, B, St, P, Sh]
self.info = info # Additional information (optional)
# pretty print
def __str__(self):
output_str = "*{}* [C{}] {}, Radius: {} AU, Moons: {}, Statics: {}".format(
self.name,
self.wh_class,
self.effect,
self.radius,
self.moons,
self.statics
)
target_list = self.__translate_statics()
if target_list:
output_str += " ("
output_str += " ".join(target_list)
output_str += ")"
if self.info is not None:
output_str += ", Other info: {}".format(self.info)
return output_str
# convert statics (e.g. B274 Y683 -> HS C4)
def __translate_statics(self):
target_list = []
for target in self.targets:
if target == Epicenter.HS_CODE:
target_list.append("HS")
elif target == Epicenter.LS_CODE:
target_list.append("LS")
elif target == Epicenter.NS_CODE:
target_list.append("NS")
else:
target_list.append("C" + str(target))
return target_list
# returns a human readable text info on the planets of the current system
def planet_info(self):
output_info = ">Planets: "
planet_list = []
for idx, planet_type in enumerate(Epiwh.planet_types):
if self.planets[idx] != 0:
planet_list += ["{}: {}".format(planet_type, self.planets[idx])]
output_info += ", ".join(planet_list)
# Check if system has perfect P.I.
if self.__has_perfect_pi():
output_info += " `Perfect P.I.`"
return output_info
# returns a human readable text info on the planets of the current system
def compact_planet_info(self):
output_info = "Planets: "
planet_list = []
for idx, planet_type in enumerate(Epiwh.planet_types):
if self.planets[idx] != 0:
planet_list += ["{}-{}".format(planet_type[0], self.planets[idx])]
output_info += ", ".join(planet_list)
# Check if system has perfect P.I.
if self.__has_perfect_pi():
output_info += " `Perfect P.I.`"
return output_info
# return True if current wormhole meets the requirements of planet_list
def __planet_match(self, planets):
for idx, planet in enumerate(planets):
if planet > self.planets[idx]:
return False
return True
# Checks if the current wormhole has perfect P.I.
def __has_perfect_pi(self):
for planets in Epiwh.perfect_pi:
if self.__planet_match(planets):
return True
return False
# check if this particular wormhole meets the criteria given as parameters
def matchCrit(self, class_list, effect_list, static_params, radius_list, moon_list, planet_list, planetNr_list):
# Only proceed if wormhole classes match
if self.wh_class in class_list:
# process effect
if effect_list:
if self.effect not in effect_list:
return False
# process statics
if static_params:
static_list = static_params[0]
exclude = static_params[1]
if static_list:
if exclude:
# exclude keyword detected
for static in static_list[0]:
if static in self.targets:
return False
else:
# do not exclude
found = False
for statics in static_list:
if set(statics) <= set(self.targets):
found = True
if not found:
return False
# process radius
if radius_list:
min_rad = radius_list[0]
max_rad = radius_list[1]
if self.radius > max_rad or self.radius < min_rad:
return False
# process moons
if moon_list:
min_moons = moon_list[0]
max_moons = moon_list[1]
if self.moons > max_moons or self.moons < min_moons:
return False
# process number of planets
if planetNr_list:
planets_nr = sum(self.planets)
min_planets = planetNr_list[0]
max_planets = planetNr_list[1]
if planets_nr > max_planets or planets_nr < min_planets:
return False
# process planets
if planet_list:
found = False
for planets in planet_list:
if self.__planet_match(planets):
found = True
if not found:
return False
# everything checked out, return True
return True
else:
# Wormhole class does not match
return False
class Epicenter:
Delimiter = ";"
HS_CODE = 100
LS_CODE = 200
NS_CODE = 300
# constructor
def __init__(self, db_name, table_wh, table_statics):
self.db_name = db_name # epicenter database name
self.table_wh = table_wh # table name where info on wormholes is stored
self.table_statics = table_statics # table name where info on static codes is stored
self.__epistatics = [] # ram mirror of statics table
self.__epiwhlist = [] # ram mirror of wormhole table
# database connection
self.db_con = lite.connect(self.db_name)
self.cursor = self.db_con.cursor()
# -----------------------------------------------------------------------------
# load statics data
statement = "SELECT * FROM {}".format(self.table_statics)
result = self.cursor.execute(statement)
for row in result:
epix = Epistatic(row[0], row[1], int(row[2]), int(row[3]), int(row[4]), row[5])
self.__epistatics.append(epix)
# -----------------------------------------------------------------------------
# load wormhole data
statement = """SELECT SysId, Name, Class, Effect, Radius, Statics, Moons,
Temperate, Ice, Gas, Oceanic, Lava, Barren, Storm, Plasma, Shattered, Info FROM {}""".format(self.table_wh)
result = self.cursor.execute(statement)
for row in result:
planets = [row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15]]
epiwh = Epiwh(row[0], row[1], row[2], row[3], row[4], row[5], [], row[6], planets, row[16])
# compute static targets list
if epiwh.statics.lower() != "unknown":
for static_code in epiwh.statics.split():
target = self.convertStatic(static_code)
if target == "HS":
epiwh.targets.append(Epicenter.HS_CODE)
elif target == "LS":
epiwh.targets.append(Epicenter.LS_CODE)
elif target == "NS":
epiwh.targets.append(Epicenter.NS_CODE)
elif target[0] == "C":
epiwh.targets.append(int(target[1:]))
else:
# unknown target system - ignore
pass
self.__epiwhlist.append(epiwh)
# print epiwh
# database connection not needed anymore
self.__closeDb()
# Get the internal system Id of the wormhole
def getSysId(self, name):
try:
sysId = next(epiwh for epiwh in self.__epiwhlist if epiwh.name == name).sysId
except StopIteration:
sysId = 0
return sysId
# Get the class of the wormhole
def getClass(self, name):
try:
wh_class = next(epiwh for epiwh in self.__epiwhlist if epiwh.name == name).wh_class
except StopIteration:
wh_class = 0
return wh_class
# Get information about a static code
def getStatic(self, static_code):
static_code = static_code.upper()
for epistatic in self.__epistatics:
if static_code == epistatic.static_code:
return str(epistatic)
return "Code '{}' not found in database".format(static_code)
# Get mass of a static code
def static_mass(self, static_code):
static_code = static_code.upper()
for epistatic in self.__epistatics:
if static_code == epistatic.static_code:
return [float(epistatic.maxmass), float(epistatic.maxjump)]
return [0, 0]
# Find out the target system of the static code
def convertStatic(self, static_code):
try:
target_sys = next(
epistatic for epistatic in self.__epistatics if epistatic.static_code == static_code
).wh_class
except StopIteration:
target_sys = "Unk"
return target_sys
# Retrieve overall information on a wormhole
def info(self, name):
try:
epiwh = next(epiwh for epiwh in self.__epiwhlist if epiwh.name == name)
except StopIteration:
epiwh = None
if epiwh is not None:
output_info = str(epiwh)
else:
output_info = "Unknown wormhole"
return output_info
# Retrieve planet information on a wormhole
def planets(self, name, display_compact=False):
try:
epiwh = next(epiwh for epiwh in self.__epiwhlist if epiwh.name == name)
except StopIteration:
epiwh = None
if epiwh is not None:
if display_compact:
output_info = epiwh.compact_planet_info()
else:
output_info = epiwh.planet_info()
else:
output_info = "Unknown wormhole"
return output_info
# Close connection to database
def __closeDb(self):
# closing database
self.db_con.close()
# -----------------------------------------------------------------------------
# Generic Stuff
# -----------------------------------------------------------------------------
# Compute integer range (e.g. 4-10)
def __computeIntRange(self, text):
min_nr = 0
max_nr = 0
matchObj = re.search("([0-9]+)-([0-9]+)", text, re.I)
if matchObj:
min_str = matchObj.group(1)
max_str = matchObj.group(2)
if BbCommon.represents_int(min_str) and BbCommon.represents_int(max_str):
min_nr = int(min_str)
max_nr = int(max_str)
# min should be smaller than max
if matchObj and min_nr <= max_nr:
return [min_nr, max_nr]
else:
return []
# Compute floating range (e.g 14.3-28.1)
def __computeFloatRange(self, text):
min_nr = 0
max_nr = 0
matchObj = re.search("([.0-9]+)-([.0-9]+)", text, re.I)
if matchObj:
min_str = matchObj.group(1)
max_str = matchObj.group(2)
if BbCommon.represents_float(min_str) and BbCommon.represents_float(max_str):
min_nr = float(min_str)
max_nr = float(max_str)
# maximum radius should be greater than zero and min should be smaller than max
if matchObj and max_nr != 0 and min_nr <= max_nr:
return [min_nr, max_nr]
else:
return []
# -----------------------------------------------------------------------------
# Compute effects
def __computeEffects(self, text):
effect_local = ["black hole", "cataclysmic", "magnetar", "no effect", "pulsar", "red giant", "wolf-rayet"]
effect_list = []
exclude = False
if "exclude" in text:
exclude = True
effect_list = list(Epiwh.effect_types)
for idx, effect in enumerate(effect_local):
if effect in text:
if exclude:
effect_list.remove(Epiwh.effect_types[idx])
else:
effect_list.append(Epiwh.effect_types[idx])
return effect_list
# Compute statics
def __computeStatics(self, text):
static_list = []
exclude = True if "exclude" in text else False
for group in text.split(" or "):
sub_list = []
if any(substr in group for substr in ["hs", "high-sec"]):
sub_list.append(Epicenter.HS_CODE)
if any(substr in group for substr in ["ls", "low-sec"]):
sub_list.append(Epicenter.LS_CODE)
if any(substr in group for substr in ["ns", "null-sec", "nul-sec"]):
sub_list.append(Epicenter.NS_CODE)
class_text = re.findall("C([0-9]{1,2})", group, re.I)
for wh_class in class_text:
sub_list.append(int(wh_class))
if sub_list:
static_list.append(sub_list)
return [static_list, exclude]
# Compute radius
def __computeRadius(self, text):
return self.__computeFloatRange(text)
# Compute number of moons
def __computeNrMoons(self, text):
return self.__computeIntRange(text)
# Number of planets of a given type
def __getPlanet(self, text, pattern_list):
for pattern in pattern_list:
regex_pattern = pattern + "([0-9]+)"
matchObj = re.search(regex_pattern, text, re.I)
if matchObj:
nr_planets = int(matchObj.group(1))
return nr_planets
return 0
# Compute planets
def __computePlanets(self, text):
planet_list = []
# Check if perfect P.I. is wanted
if "perfect" in text:
planet_list = Epiwh.perfect_pi
else:
for group in text.split(" or "):
planets = [0, 0, 0, 0, 0, 0, 0, 0, 0]
planets[0] = self.__getPlanet(group, ["temperate-", "t-"])
planets[1] = self.__getPlanet(group, ["ice-", "i-"])
planets[2] = self.__getPlanet(group, ["gas-", "g-"])
planets[3] = self.__getPlanet(group, ["oceanic-", "o-"])
planets[4] = self.__getPlanet(group, ["lava-", "l-"])
planets[5] = self.__getPlanet(group, ["barren-", "b-"])
planets[6] = self.__getPlanet(group, ["storm-", "s-"])
planets[7] = self.__getPlanet(group, ["plasma-", "p-"])
planets[8] = self.__getPlanet(group, ["shattered-", "sh-"])
if planets != [0, 0, 0, 0, 0, 0, 0, 0, 0]:
planet_list.append(planets)
return planet_list
# Compute number of planets
def __computeNrPlanets(self, text):
return self.__computeIntRange(text)
# -----------------------------------------------------------------------------
# Compute jcodes from a generic order
def computeGeneric(self, text):
jcodes = [] # the output
class_list = [] # list of ordered classes (mandatory)
effect_list = [] # list of ordered effects (optional)
static_params = [] # list of ordered statics[can be list of lists] + exclude flag (optional)
radius_list = [] # min and max radius (optional)
moon_list = [] # min and max moons (optional)
planet_list = [] # list of planets (optional)
planetNr_list = [] # min and max planets (optional)
text = text.lower() # work with lower case to simplify process
groups = text.split(Epicenter.Delimiter) # split order into groups
# class group (should always be the first one in group)
if len(groups) > 0:
if "sansha" in groups[0]:
return ["Matches: 2; Processed: Sansha Override!", ['J005299', 'J010556']]
if "all" in groups[0]:
class_list = [1, 2, 3, 4, 5, 6, 13, 14, 15, 16, 17, 18]
else:
if "tripnull" in groups[0]:
class_list += [13]
if "drifter" in groups[0]:
class_list += [14, 15, 16, 17, 18]
class_text = re.findall("C([0-9]{1,2})", groups[0], re.I)
for wh_class in class_text:
class_list.append(int(wh_class))
# determine if a shattered wormhole is wanted
if "non-shattered" in groups[0]:
moon_list = [1, 1000]
elif "shattered" in groups[0]:
moon_list = [0, 0]
# only proceed if we determind which class (classes) bountybot should search for
if len(class_list) > 0:
for group in groups[1:]:
# effect group
if any(substr in group for substr in ["effect", "effects"]):
if not effect_list:
effect_list = self.__computeEffects(group)
# statics group
elif any(substr in group for substr in ["static", "statics"]):
if not static_params:
static_params = self.__computeStatics(group)
# radius group (min, max) - default 'min'
elif any(substr in group for substr in ["radius", "size"]):
if not radius_list:
radius_list = self.__computeRadius(group)
# planets group
elif any(substr in group for substr in ["planet", "planets", "p.i."]):
if not planet_list:
planet_list = self.__computePlanets(group)
if not planetNr_list:
planetNr_list = self.__computeNrPlanets(group)
# nr. of moons group (min, max, exact) - default 'exact'
elif any(substr in group for substr in ["moon", "moons"]):
if not moon_list:
moon_list = self.__computeNrMoons(group)
# nothing found, must be a comment?
else:
pass
# try to match wormholes respecting the given criteria
for epiwh in self.__epiwhlist:
if epiwh.matchCrit(class_list, effect_list, static_params, radius_list, moon_list, planet_list,
planetNr_list):
jcodes.append(epiwh.name)
# determine which user input has been given consideration
processed = []
if not class_list:
processed.append("none")
else:
processed.append("class")
if effect_list:
processed.append("effects")
if static_params:
if static_params[0]:
processed.append("statics")
if radius_list:
processed.append("radius")
if moon_list:
processed.append("moons")
if planet_list:
processed.append("planets")
if planetNr_list:
processed.append("planet numbers")
# construct result message
matches = len(jcodes)
processed = ", ".join(processed)
processed += "."
result_info = "Matches: {}; Processed: {}".format(matches, processed)
# debugging
# print "Class:", class_list
# print "Effects:", effect_list
# print "Statics:", static_params
# print "Radius:", radius_list
# print "Moons:", moon_list
# print "Planets:", planet_list
# print "Nr. Planets:", planetNr_list
return [result_info, jcodes]
def main():
# Development purposes
epi = Epicenter("../../epicenter.db", "wormholes", "statics")
print epi
if __name__ == '__main__':
main()
| |
##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""GL tracing generator."""
import re
import sys
from trace import Tracer
from dispatch import function_pointer_type, function_pointer_value
import specs.stdapi as stdapi
import specs.glapi as glapi
import specs.glparams as glparams
from specs.glxapi import glxapi
class TypeGetter(stdapi.Visitor):
'''Determine which glGet*v function that matches the specified type.'''
def __init__(self, prefix = 'glGet', long_suffix = True, ext_suffix = ''):
self.prefix = prefix
self.long_suffix = long_suffix
self.ext_suffix = ext_suffix
def visitConst(self, const):
return self.visit(const.type)
def visitAlias(self, alias):
if alias.expr == 'GLboolean':
if self.long_suffix:
suffix = 'Booleanv'
arg_type = alias.expr
else:
suffix = 'iv'
arg_type = 'GLint'
elif alias.expr == 'GLdouble':
if self.long_suffix:
suffix = 'Doublev'
arg_type = alias.expr
else:
suffix = 'dv'
arg_type = alias.expr
elif alias.expr == 'GLfloat':
if self.long_suffix:
suffix = 'Floatv'
arg_type = alias.expr
else:
suffix = 'fv'
arg_type = alias.expr
elif alias.expr in ('GLint', 'GLuint', 'GLsizei'):
if self.long_suffix:
suffix = 'Integerv'
arg_type = 'GLint'
else:
suffix = 'iv'
arg_type = 'GLint'
else:
print alias.expr
assert False
function_name = self.prefix + suffix + self.ext_suffix
return function_name, arg_type
def visitEnum(self, enum):
return self.visit(glapi.GLint)
def visitBitmask(self, bitmask):
return self.visit(glapi.GLint)
def visitOpaque(self, pointer):
return self.prefix + 'Pointerv' + self.ext_suffix, 'GLvoid *'
class GlTracer(Tracer):
arrays = [
("Vertex", "VERTEX"),
("Normal", "NORMAL"),
("Color", "COLOR"),
("Index", "INDEX"),
("TexCoord", "TEXTURE_COORD"),
("EdgeFlag", "EDGE_FLAG"),
("FogCoord", "FOG_COORD"),
("SecondaryColor", "SECONDARY_COLOR"),
]
arrays.reverse()
# arrays available in ES1
arrays_es1 = ("Vertex", "Normal", "Color", "TexCoord")
def header(self, api):
Tracer.header(self, api)
print '#include <algorithm>'
print
print '#include "gltrace.hpp"'
print
# Which glVertexAttrib* variant to use
print 'enum vertex_attrib {'
print ' VERTEX_ATTRIB,'
print ' VERTEX_ATTRIB_NV,'
print '};'
print
print 'static vertex_attrib _get_vertex_attrib(void) {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->user_arrays_nv) {'
print ' GLboolean _vertex_program = GL_FALSE;'
print ' _glGetBooleanv(GL_VERTEX_PROGRAM_ARB, &_vertex_program);'
print ' if (_vertex_program) {'
print ' if (ctx->user_arrays_nv) {'
print ' GLint _vertex_program_binding_nv = _glGetInteger(GL_VERTEX_PROGRAM_BINDING_NV);'
print ' if (_vertex_program_binding_nv) {'
print ' return VERTEX_ATTRIB_NV;'
print ' }'
print ' }'
print ' }'
print ' }'
print ' return VERTEX_ATTRIB;'
print '}'
print
self.defineShadowBufferHelper()
# Whether we need user arrays
print 'static inline bool _need_user_arrays(void)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (!ctx->user_arrays) {'
print ' return false;'
print ' }'
print
print ' glprofile::Profile profile = ctx->profile;'
print ' bool es1 = profile.es() && profile.major == 1;'
print
for camelcase_name, uppercase_name in self.arrays:
# in which profile is the array available?
profile_check = 'profile.desktop()'
if camelcase_name in self.arrays_es1:
profile_check = '(' + profile_check + ' || es1)';
function_name = 'gl%sPointer' % camelcase_name
enable_name = 'GL_%s_ARRAY' % uppercase_name
binding_name = 'GL_%s_ARRAY_BUFFER_BINDING' % uppercase_name
print ' // %s' % function_name
print ' if (%s) {' % profile_check
self.array_prolog(api, uppercase_name)
print ' if (_glIsEnabled(%s) &&' % enable_name
print ' _glGetInteger(%s) == 0) {' % binding_name
self.array_cleanup(api, uppercase_name)
print ' return true;'
print ' }'
self.array_epilog(api, uppercase_name)
print ' }'
print
print ' // ES1 does not support generic vertex attributes'
print ' if (es1)'
print ' return false;'
print
print ' vertex_attrib _vertex_attrib = _get_vertex_attrib();'
print
print ' // glVertexAttribPointer'
print ' if (_vertex_attrib == VERTEX_ATTRIB) {'
print ' GLint _max_vertex_attribs = _glGetInteger(GL_MAX_VERTEX_ATTRIBS);'
print ' for (GLint index = 0; index < _max_vertex_attribs; ++index) {'
print ' if (_glGetVertexAttribi(index, GL_VERTEX_ATTRIB_ARRAY_ENABLED) &&'
print ' _glGetVertexAttribi(index, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING) == 0) {'
print ' return true;'
print ' }'
print ' }'
print ' }'
print
print ' // glVertexAttribPointerNV'
print ' if (_vertex_attrib == VERTEX_ATTRIB_NV) {'
print ' for (GLint index = 0; index < 16; ++index) {'
print ' if (_glIsEnabled(GL_VERTEX_ATTRIB_ARRAY0_NV + index)) {'
print ' return true;'
print ' }'
print ' }'
print ' }'
print
print ' return false;'
print '}'
print
print r'static void _trace_user_arrays(GLuint count);'
print
print r'static void _fakeStringMarker(GLsizei len, const GLvoid * string);'
print
print r'static inline void'
print r'_fakeStringMarker(const std::string &s) {'
print r' _fakeStringMarker(s.length(), s.data());'
print r'}'
print
print '// whether glLockArraysEXT() has ever been called'
print 'static bool _checkLockArraysEXT = false;'
print
# Buffer mappings
print '// whether glMapBufferRange(GL_MAP_WRITE_BIT) has ever been called'
print 'static bool _checkBufferMapRange = false;'
print
print '// whether glBufferParameteriAPPLE(GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE) has ever been called'
print 'static bool _checkBufferFlushingUnmapAPPLE = false;'
print
# Generate a helper function to determine whether a parameter name
# refers to a symbolic value or not
print 'static bool'
print 'is_symbolic_pname(GLenum pname) {'
print ' switch (pname) {'
for function, type, count, name in glparams.parameters:
if type is glapi.GLenum:
print ' case %s:' % name
print ' return true;'
print ' default:'
print ' return false;'
print ' }'
print '}'
print
# Generate a helper function to determine whether a parameter value is
# potentially symbolic or not; i.e., if the value can be represented in
# an enum or not
print 'template<class T>'
print 'static inline bool'
print 'is_symbolic_param(T param) {'
print ' return static_cast<T>(static_cast<GLenum>(param)) == param;'
print '}'
print
# Generate a helper function to know how many elements a parameter has
print 'static size_t'
print '_gl_param_size(GLenum pname) {'
print ' switch (pname) {'
for function, type, count, name in glparams.parameters:
if name == 'GL_PROGRAM_BINARY_FORMATS':
count = 0
if type is not None:
print ' case %s: return %s;' % (name, count)
print ' default:'
print r' os::log("apitrace: warning: %s: unknown GLenum 0x%04X\n", __FUNCTION__, pname);'
print ' return 1;'
print ' }'
print '}'
print
# states such as GL_UNPACK_ROW_LENGTH are not available in GLES
print 'static inline bool'
print 'can_unpack_subimage(void) {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' return ctx->profile.desktop();'
print '}'
print
# VMWX_map_buffer_debug
print r'extern "C" PUBLIC'
print r'void APIENTRY'
print r'glNotifyMappedBufferRangeVMWX(const void * start, GLsizeiptr length) {'
self.emit_memcpy('start', 'length')
print r'}'
print
getProcAddressFunctionNames = []
def traceApi(self, api):
if self.getProcAddressFunctionNames:
# Generate a function to wrap proc addresses
getProcAddressFunction = api.getFunctionByName(self.getProcAddressFunctionNames[0])
argType = getProcAddressFunction.args[0].type
retType = getProcAddressFunction.type
print 'static %s _wrapProcAddress(%s procName, %s procPtr);' % (retType, argType, retType)
print
Tracer.traceApi(self, api)
print 'static %s _wrapProcAddress(%s procName, %s procPtr) {' % (retType, argType, retType)
# Provide fallback functions to missing debug functions
print ' if (!procPtr) {'
else_ = ''
for function_name in self.debug_functions:
if self.api.getFunctionByName(function_name):
print ' %sif (strcmp("%s", (const char *)procName) == 0) {' % (else_, function_name)
print ' return (%s)&%s;' % (retType, function_name)
print ' }'
else_ = 'else '
print ' %s{' % else_
print ' return NULL;'
print ' }'
print ' }'
for function in api.getAllFunctions():
ptype = function_pointer_type(function)
pvalue = function_pointer_value(function)
print ' if (strcmp("%s", (const char *)procName) == 0) {' % function.name
print ' assert(procPtr != (%s)&%s);' % (retType, function.name)
print ' %s = (%s)procPtr;' % (pvalue, ptype)
print ' return (%s)&%s;' % (retType, function.name,)
print ' }'
print ' os::log("apitrace: warning: unknown function \\"%s\\"\\n", (const char *)procName);'
print ' return procPtr;'
print '}'
print
else:
Tracer.traceApi(self, api)
def defineShadowBufferHelper(self):
print 'void _shadow_glGetBufferSubData(GLenum target, GLintptr offset,'
print ' GLsizeiptr size, GLvoid *data)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (!ctx->needsShadowBuffers() || target != GL_ELEMENT_ARRAY_BUFFER) {'
print ' _glGetBufferSubData(target, offset, size, data);'
print ' return;'
print ' }'
print
print ' GLint buffer_binding = _glGetInteger(GL_ELEMENT_ARRAY_BUFFER_BINDING);'
print ' if (buffer_binding > 0) {'
print ' gltrace::Buffer & buf = ctx->buffers[buffer_binding];'
print ' buf.getSubData(offset, size, data);'
print ' }'
print '}'
def shadowBufferMethod(self, method):
# Emit code to fetch the shadow buffer, and invoke a method
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->needsShadowBuffers() && target == GL_ELEMENT_ARRAY_BUFFER) {'
print ' GLint buffer_binding = _glGetInteger(GL_ELEMENT_ARRAY_BUFFER_BINDING);'
print ' if (buffer_binding > 0) {'
print ' gltrace::Buffer & buf = ctx->buffers[buffer_binding];'
print ' buf.' + method + ';'
print ' }'
print ' }'
print
def shadowBufferProlog(self, function):
if function.name == 'glBufferData':
self.shadowBufferMethod('bufferData(size, data)')
if function.name == 'glBufferSubData':
self.shadowBufferMethod('bufferSubData(offset, size, data)')
if function.name == 'glDeleteBuffers':
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->needsShadowBuffers()) {'
print ' for (GLsizei i = 0; i < n; i++) {'
print ' ctx->buffers.erase(buffer[i]);'
print ' }'
print ' }'
array_pointer_function_names = set((
"glVertexPointer",
"glNormalPointer",
"glColorPointer",
"glIndexPointer",
"glTexCoordPointer",
"glEdgeFlagPointer",
"glFogCoordPointer",
"glSecondaryColorPointer",
"glInterleavedArrays",
"glVertexPointerEXT",
"glNormalPointerEXT",
"glColorPointerEXT",
"glIndexPointerEXT",
"glTexCoordPointerEXT",
"glEdgeFlagPointerEXT",
"glFogCoordPointerEXT",
"glSecondaryColorPointerEXT",
"glVertexAttribPointer",
"glVertexAttribPointerARB",
"glVertexAttribPointerNV",
"glVertexAttribIPointer",
"glVertexAttribIPointerEXT",
"glVertexAttribLPointer",
"glVertexAttribLPointerEXT",
#"glMatrixIndexPointerARB",
))
# XXX: We currently ignore the gl*Draw*ElementArray* functions
draw_function_regex = re.compile(r'^gl([A-Z][a-z]+)*Draw(Range)?(Arrays|Elements)([A-Z][a-zA-Z]*)?$' )
interleaved_formats = [
'GL_V2F',
'GL_V3F',
'GL_C4UB_V2F',
'GL_C4UB_V3F',
'GL_C3F_V3F',
'GL_N3F_V3F',
'GL_C4F_N3F_V3F',
'GL_T2F_V3F',
'GL_T4F_V4F',
'GL_T2F_C4UB_V3F',
'GL_T2F_C3F_V3F',
'GL_T2F_N3F_V3F',
'GL_T2F_C4F_N3F_V3F',
'GL_T4F_C4F_N3F_V4F',
]
def traceFunctionImplBody(self, function):
# Defer tracing of user array pointers...
if function.name in self.array_pointer_function_names:
print ' GLint _array_buffer = _glGetInteger(GL_ARRAY_BUFFER_BINDING);'
print ' if (!_array_buffer) {'
print ' static bool warned = false;'
print ' if (!warned) {'
print ' warned = true;'
print ' os::log("apitrace: warning: %s: call will be faked due to pointer to user memory (https://github.com/apitrace/apitrace/blob/master/docs/BUGS.markdown#tracing)\\n", __FUNCTION__);'
print ' }'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' ctx->user_arrays = true;'
if function.name == "glVertexAttribPointerNV":
print ' ctx->user_arrays_nv = true;'
self.invokeFunction(function)
# And also break down glInterleavedArrays into the individual calls
if function.name == 'glInterleavedArrays':
print
# Initialize the enable flags
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
print ' GLboolean %s = GL_FALSE;' % flag_name
print
# Switch for the interleaved formats
print ' switch (format) {'
for format in self.interleaved_formats:
print ' case %s:' % format
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
if format.find('_' + uppercase_name[0]) >= 0:
print ' %s = GL_TRUE;' % flag_name
print ' break;'
print ' default:'
print ' return;'
print ' }'
print
# Emit fake glEnableClientState/glDisableClientState flags
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
enable_name = 'GL_%s_ARRAY' % uppercase_name
# Emit a fake function
print ' {'
print ' static const trace::FunctionSig &_sig = %s ? _glEnableClientState_sig : _glDisableClientState_sig;' % flag_name
print ' unsigned _call = trace::localWriter.beginEnter(&_sig, true);'
print ' trace::localWriter.beginArg(0);'
self.serializeValue(glapi.GLenum, enable_name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
# Warn about buggy glGet(GL_*ARRAY_SIZE) not returning GL_BGRA
buggyFunctions = {
'glColorPointer': ('glGetIntegerv', '', 'GL_COLOR_ARRAY_SIZE'),
'glSecondaryColorPointer': ('glGetIntegerv', '', 'GL_SECONDARY_COLOR_ARRAY_SIZE'),
'glVertexAttribPointer': ('glGetVertexAttribiv', 'index, ', 'GL_VERTEX_ATTRIB_ARRAY_SIZE'),
'glVertexAttribPointerARB': ('glGetVertexAttribivARB', 'index, ', 'GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB'),
}
if function.name in buggyFunctions:
getter, extraArg, pname = buggyFunctions[function.name]
print r' static bool _checked = false;'
print r' if (!_checked && size == GL_BGRA) {'
print r' GLint _size = 0;'
print r' _%s(%s%s, &_size);' % (getter, extraArg, pname)
print r' if (_size != GL_BGRA) {'
print r' os::log("apitrace: warning: %s(%s) does not return GL_BGRA; trace will be incorrect (https://github.com/apitrace/apitrace/issues/261)\n");' % (getter, pname)
print r' }'
print r' _checked = true;'
print r' }'
print ' return;'
print ' }'
# ... to the draw calls
if self.draw_function_regex.match(function.name):
print ' if (_need_user_arrays()) {'
if 'Indirect' in function.name:
print r' os::log("apitrace: warning: %s: indirect user arrays not supported\n");' % (function.name,)
else:
arg_names = ', '.join([arg.name for arg in function.args[1:]])
print ' GLuint _count = _%s_count(%s);' % (function.name, arg_names)
# Some apps, in particular Quake3, can tell the driver to lock more
# vertices than those actually required for the draw call.
print ' if (_checkLockArraysEXT) {'
print ' GLuint _locked_count = _glGetInteger(GL_ARRAY_ELEMENT_LOCK_FIRST_EXT)'
print ' + _glGetInteger(GL_ARRAY_ELEMENT_LOCK_COUNT_EXT);'
print ' _count = std::max(_count, _locked_count);'
print ' }'
print ' _trace_user_arrays(_count);'
print ' }'
if function.name == 'glLockArraysEXT':
print ' _checkLockArraysEXT = true;'
# Warn if user arrays are used with glBegin/glArrayElement/glEnd.
if function.name == 'glBegin':
print r' gltrace::Context *ctx = gltrace::getContext();'
print r' ctx->userArraysOnBegin = _need_user_arrays();'
if function.name.startswith('glArrayElement'):
print r' gltrace::Context *ctx = gltrace::getContext();'
print r' if (ctx->userArraysOnBegin) {'
print r' os::log("apitrace: warning: user arrays with glArrayElement not supported (https://github.com/apitrace/apitrace/issues/276)\n");'
print r' ctx->userArraysOnBegin = false;'
print r' }'
# Emit a fake memcpy on buffer uploads
if function.name == 'glBufferParameteriAPPLE':
print ' if (pname == GL_BUFFER_FLUSHING_UNMAP_APPLE && param == GL_FALSE) {'
print ' _checkBufferFlushingUnmapAPPLE = true;'
print ' }'
if function.name in ('glUnmapBuffer', 'glUnmapBufferARB'):
if function.name.endswith('ARB'):
suffix = 'ARB'
else:
suffix = ''
print ' GLint access_flags = 0;'
print ' GLint access = 0;'
print ' bool flush;'
print ' // GLES3 does not have GL_BUFFER_ACCESS;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_ACCESS_FLAGS, &access_flags);' % suffix
print ' flush = (access_flags & GL_MAP_WRITE_BIT) && !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT));'
print ' } else {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_ACCESS, &access);' % suffix
print ' flush = access != GL_READ_ONLY;'
print ' }'
print ' if (flush) {'
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv%s(target, GL_BUFFER_MAP_POINTER, &map);' % suffix
print ' if (map) {'
print ' GLint length = -1;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_MAP_LENGTH, &length);' % suffix
print ' if (length == -1) {'
print ' // Mesa drivers refuse GL_BUFFER_MAP_LENGTH without GL 3.0 up-to'
print ' // http://cgit.freedesktop.org/mesa/mesa/commit/?id=ffee498fb848b253a7833373fe5430f8c7ca0c5f'
print ' static bool warned = false;'
print ' if (!warned) {'
print ' os::log("apitrace: warning: glGetBufferParameteriv%s(GL_BUFFER_MAP_LENGTH) failed\\n");' % suffix
print ' warned = true;'
print ' }'
print ' }'
print ' } else {'
print ' length = 0;'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_SIZE, &length);' % suffix
print ' }'
print ' if (_checkBufferFlushingUnmapAPPLE) {'
print ' GLint flushing_unmap = GL_TRUE;'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_FLUSHING_UNMAP_APPLE, &flushing_unmap);' % suffix
print ' flush = flush && flushing_unmap;'
print ' }'
print ' if (flush && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
print ' }'
if function.name == 'glUnmapBufferOES':
print ' GLint access_flags = 0;'
print ' GLint access = 0;'
print ' bool flush;'
print ' // GLES3 does not have GL_BUFFER_ACCESS;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' flush = (access_flags & GL_MAP_WRITE_BIT) && !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT));'
print ' } else {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_ACCESS, &access);'
print ' flush = access != GL_READ_ONLY;'
print ' }'
print ' if (flush) {'
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointervOES(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map) {'
print ' GLint length = 0;'
print ' GLint offset = 0;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_MAP_LENGTH, &length);'
print ' _glGetBufferParameteriv(target, GL_BUFFER_MAP_OFFSET, &offset);'
print ' } else {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_SIZE, &length);'
print ' }'
print ' if (flush && length > 0) {'
self.emit_memcpy('map', 'length')
self.shadowBufferMethod('bufferSubData(offset, length, map)')
print ' }'
print ' }'
print ' }'
if function.name == 'glUnmapNamedBuffer':
print ' GLint access_flags = 0;'
print ' _glGetNamedBufferParameteriv(buffer, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' if ((access_flags & GL_MAP_WRITE_BIT) &&'
print ' !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT))) {'
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointerv(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' GLint length = 0;'
print ' _glGetNamedBufferParameteriv(buffer, GL_BUFFER_MAP_LENGTH, &length);'
print ' if (map && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
if function.name == 'glUnmapNamedBufferEXT':
print ' GLint access_flags = 0;'
print ' _glGetNamedBufferParameterivEXT(buffer, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' if ((access_flags & GL_MAP_WRITE_BIT) &&'
print ' !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT))) {'
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointervEXT(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' GLint length = 0;'
print ' _glGetNamedBufferParameterivEXT(buffer, GL_BUFFER_MAP_LENGTH, &length);'
print ' if (map && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
if function.name == 'glFlushMappedBufferRange':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedBufferRangeEXT':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointervOES(target, GL_BUFFER_MAP_POINTER_OES, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedBufferRangeAPPLE':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && size > 0) {'
self.emit_memcpy('(const char *)map + offset', 'size')
print ' }'
if function.name == 'glFlushMappedNamedBufferRange':
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointerv(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedNamedBufferRangeEXT':
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointervEXT(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
# FIXME: We don't support coherent/pinned memory mappings
if function.name in ('glBufferStorage', 'glNamedBufferStorage', 'glNamedBufferStorageEXT'):
print r' if (!(flags & GL_MAP_PERSISTENT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/o MAP_PERSISTENT_BIT\n", __FUNCTION__);'
print r' }'
print r' flags &= ~GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX;'
if function.name in ('glMapBufferRange', 'glMapBufferRangeEXT', 'glMapNamedBufferRange', 'glMapNamedBufferRangeEXT'):
print r' if (access & GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX) {'
print r' if (!(access & GL_MAP_PERSISTENT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/o MAP_PERSISTENT_BIT\n", __FUNCTION__);'
print r' }'
print r' if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/ MAP_FLUSH_EXPLICIT_BIT\n", __FUNCTION__);'
print r' }'
print r' access &= ~GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX;'
print r' } else if (access & GL_MAP_COHERENT_BIT) {'
print r' os::log("apitrace: warning: %s: MAP_COHERENT_BIT unsupported (https://github.com/apitrace/apitrace/issues/232)\n", __FUNCTION__);'
print r' } else if ((access & GL_MAP_PERSISTENT_BIT) &&'
print r' !(access & GL_MAP_FLUSH_EXPLICIT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_PERSISTENT_BIT w/o FLUSH_EXPLICIT_BIT unsupported (https://github.com/apitrace/apitrace/issues/232)\n", __FUNCTION__);'
print r' }'
if function.name in ('glBufferData', 'glBufferDataARB'):
print r' if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {'
print r' os::log("apitrace: warning: GL_AMD_pinned_memory not fully supported\n");'
print r' }'
# TODO: We don't track GL_INTEL_map_texture mappings
if function.name == 'glMapTexture2DINTEL':
print r' if (access & GL_MAP_WRITE_BIT) {'
print r' os::log("apitrace: warning: GL_INTEL_map_texture not fully supported\n");'
print r' }'
# Don't leave vertex attrib locations to chance. Instead emit fake
# glBindAttribLocation calls to ensure that the same locations will be
# used when retracing. Trying to remap locations after the fact would
# be an herculian task given that vertex attrib locations appear in
# many entry-points, including non-shader related ones.
if function.name == 'glLinkProgram':
Tracer.invokeFunction(self, function)
print ' GLint active_attributes = 0;'
print ' _glGetProgramiv(program, GL_ACTIVE_ATTRIBUTES, &active_attributes);'
print ' for (GLint attrib = 0; attrib < active_attributes; ++attrib) {'
print ' GLint size = 0;'
print ' GLenum type = 0;'
print ' GLchar name[256];'
# TODO: Use ACTIVE_ATTRIBUTE_MAX_LENGTH instead of 256
print ' _glGetActiveAttrib(program, attrib, sizeof name, NULL, &size, &type, name);'
print " if (name[0] != 'g' || name[1] != 'l' || name[2] != '_') {"
print ' GLint location = _glGetAttribLocation(program, name);'
print ' if (location >= 0) {'
bind_function = glapi.glapi.getFunctionByName('glBindAttribLocation')
self.fake_call(bind_function, ['program', 'location', 'name'])
print ' }'
print ' }'
print ' }'
if function.name == 'glLinkProgramARB':
Tracer.invokeFunction(self, function)
print ' GLint active_attributes = 0;'
print ' _glGetObjectParameterivARB(programObj, GL_OBJECT_ACTIVE_ATTRIBUTES_ARB, &active_attributes);'
print ' for (GLint attrib = 0; attrib < active_attributes; ++attrib) {'
print ' GLint size = 0;'
print ' GLenum type = 0;'
print ' GLcharARB name[256];'
# TODO: Use ACTIVE_ATTRIBUTE_MAX_LENGTH instead of 256
print ' _glGetActiveAttribARB(programObj, attrib, sizeof name, NULL, &size, &type, name);'
print " if (name[0] != 'g' || name[1] != 'l' || name[2] != '_') {"
print ' GLint location = _glGetAttribLocationARB(programObj, name);'
print ' if (location >= 0) {'
bind_function = glapi.glapi.getFunctionByName('glBindAttribLocationARB')
self.fake_call(bind_function, ['programObj', 'location', 'name'])
print ' }'
print ' }'
print ' }'
self.shadowBufferProlog(function)
Tracer.traceFunctionImplBody(self, function)
# These entrypoints are only expected to be implemented by tools;
# drivers will probably not implement them.
marker_functions = [
# GL_GREMEDY_string_marker
'glStringMarkerGREMEDY',
# GL_GREMEDY_frame_terminator
'glFrameTerminatorGREMEDY',
]
# These entrypoints may be implemented by drivers, but are also very useful
# for debugging / analysis tools.
debug_functions = [
# GL_KHR_debug
'glDebugMessageControl',
'glDebugMessageInsert',
'glDebugMessageCallback',
'glGetDebugMessageLog',
'glPushDebugGroup',
'glPopDebugGroup',
'glObjectLabel',
'glGetObjectLabel',
'glObjectPtrLabel',
'glGetObjectPtrLabel',
# GL_KHR_debug (for OpenGL ES)
'glDebugMessageControlKHR',
'glDebugMessageInsertKHR',
'glDebugMessageCallbackKHR',
'glGetDebugMessageLogKHR',
'glPushDebugGroupKHR',
'glPopDebugGroupKHR',
'glObjectLabelKHR',
'glGetObjectLabelKHR',
'glObjectPtrLabelKHR',
'glGetObjectPtrLabelKHR',
# GL_ARB_debug_output
'glDebugMessageControlARB',
'glDebugMessageInsertARB',
'glDebugMessageCallbackARB',
'glGetDebugMessageLogARB',
# GL_AMD_debug_output
'glDebugMessageEnableAMD',
'glDebugMessageInsertAMD',
'glDebugMessageCallbackAMD',
'glGetDebugMessageLogAMD',
# GL_EXT_debug_label
'glLabelObjectEXT',
'glGetObjectLabelEXT',
# GL_EXT_debug_marker
'glInsertEventMarkerEXT',
'glPushGroupMarkerEXT',
'glPopGroupMarkerEXT',
]
def invokeFunction(self, function):
if function.name in ('glLinkProgram', 'glLinkProgramARB'):
# These functions have been dispatched already
return
# Force glProgramBinary to fail. Per ARB_get_program_binary this
# should signal the app that it needs to recompile.
if function.name in ('glProgramBinary', 'glProgramBinaryOES'):
print r' binaryFormat = 0xDEADDEAD;'
print r' binary = &binaryFormat;'
print r' length = sizeof binaryFormat;'
Tracer.invokeFunction(self, function)
def doInvokeFunction(self, function):
# Same as invokeFunction() but called both when trace is enabled or disabled.
#
# Used to modify the behavior of GL entry-points.
# Override GL extensions
if function.name in ('glGetString', 'glGetIntegerv', 'glGetStringi'):
Tracer.doInvokeFunction(self, function, prefix = 'gltrace::_', suffix = '_override')
return
# We implement GL_GREMEDY_*, etc., and not the driver
if function.name in self.marker_functions:
return
# We may be faking KHR_debug, so ensure the pointer queries result is
# always zeroed to prevent dereference of unitialized pointers
if function.name == 'glGetPointerv':
print ' if (params &&'
print ' (pname == GL_DEBUG_CALLBACK_FUNCTION ||'
print ' pname == GL_DEBUG_CALLBACK_USER_PARAM)) {'
print ' *params = NULL;'
print ' }'
if function.name in self.getProcAddressFunctionNames:
nameArg = function.args[0].name
print ' if (strcmp("glNotifyMappedBufferRangeVMWX", (const char *)%s) == 0) {' % (nameArg,)
print ' _result = (%s)&glNotifyMappedBufferRangeVMWX;' % (function.type,)
for marker_function in self.marker_functions:
if self.api.getFunctionByName(marker_function):
print ' } else if (strcmp("%s", (const char *)%s) == 0) {' % (marker_function, nameArg)
print ' _result = (%s)&%s;' % (function.type, marker_function)
print ' } else {'
Tracer.doInvokeFunction(self, function)
# Replace function addresses with ours
# XXX: Doing this here instead of wrapRet means that the trace will
# contain the addresses of the wrapper functions, and not the real
# functions, but in practice this should make no difference.
if function.name in self.getProcAddressFunctionNames:
print ' _result = _wrapProcAddress(%s, _result);' % (nameArg,)
print ' }'
return
if function.name in ('glGetProgramBinary', 'glGetProgramBinaryOES'):
print r' bufSize = 0;'
Tracer.doInvokeFunction(self, function)
if function.name == 'glGetProgramiv':
print r' if (params && pname == GL_PROGRAM_BINARY_LENGTH) {'
print r' *params = 0;'
print r' }'
if function.name in ('glGetProgramBinary', 'glGetProgramBinaryOES'):
print r' if (length) {'
print r' *length = 0;'
print r' }'
buffer_targets = [
'ARRAY_BUFFER',
'ELEMENT_ARRAY_BUFFER',
'PIXEL_PACK_BUFFER',
'PIXEL_UNPACK_BUFFER',
'UNIFORM_BUFFER',
'TEXTURE_BUFFER',
'TRANSFORM_FEEDBACK_BUFFER',
'COPY_READ_BUFFER',
'COPY_WRITE_BUFFER',
'DRAW_INDIRECT_BUFFER',
'ATOMIC_COUNTER_BUFFER',
]
def wrapRet(self, function, instance):
Tracer.wrapRet(self, function, instance)
# Keep track of buffer mappings
if function.name in ('glMapBufferRange', 'glMapBufferRangeEXT'):
print ' if (access & GL_MAP_WRITE_BIT) {'
print ' _checkBufferMapRange = true;'
print ' }'
boolean_names = [
'GL_FALSE',
'GL_TRUE',
]
def gl_boolean(self, value):
return self.boolean_names[int(bool(value))]
# Regular expression for the names of the functions that unpack from a
# pixel buffer object. See the ARB_pixel_buffer_object specification.
unpack_function_regex = re.compile(r'^gl(' + r'|'.join([
r'Bitmap',
r'PolygonStipple',
r'PixelMap[a-z]+v',
r'DrawPixels',
r'Color(Sub)?Table',
r'(Convolution|Separable)Filter[12]D',
r'(Compressed)?(Multi)?Tex(ture)?(Sub)?Image[1-4]D',
]) + r')[0-9A-Z]*$')
def serializeArgValue(self, function, arg):
# Recognize offsets instead of blobs when a PBO is bound
if self.unpack_function_regex.match(function.name) \
and (isinstance(arg.type, stdapi.Blob) \
or (isinstance(arg.type, stdapi.Const) \
and isinstance(arg.type.type, stdapi.Blob))):
print ' {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' GLint _unpack_buffer = 0;'
print ' if (ctx->profile.desktop())'
print ' _glGetIntegerv(GL_PIXEL_UNPACK_BUFFER_BINDING, &_unpack_buffer);'
print ' if (_unpack_buffer) {'
print ' trace::localWriter.writePointer((uintptr_t)%s);' % arg.name
print ' } else {'
Tracer.serializeArgValue(self, function, arg)
print ' }'
print ' }'
return
# Several GL state functions take GLenum symbolic names as
# integer/floats; so dump the symbolic name whenever possible
if function.name.startswith('gl') \
and arg.type in (glapi.GLint, glapi.GLfloat, glapi.GLdouble) \
and arg.name == 'param':
assert arg.index > 0
assert function.args[arg.index - 1].name == 'pname'
assert function.args[arg.index - 1].type == glapi.GLenum
print ' if (is_symbolic_pname(pname) && is_symbolic_param(%s)) {' % arg.name
self.serializeValue(glapi.GLenum, arg.name)
print ' } else {'
Tracer.serializeArgValue(self, function, arg)
print ' }'
return
Tracer.serializeArgValue(self, function, arg)
def footer(self, api):
Tracer.footer(self, api)
# A simple state tracker to track the pointer values
# update the state
print 'static void _trace_user_arrays(GLuint count)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print
print ' glprofile::Profile profile = ctx->profile;'
print ' bool es1 = profile.es() && profile.major == 1;'
print
# Temporarily unbind the array buffer
print ' GLint _array_buffer = _glGetInteger(GL_ARRAY_BUFFER_BINDING);'
print ' if (_array_buffer) {'
self.fake_glBindBuffer(api, 'GL_ARRAY_BUFFER', '0')
print ' }'
print
for camelcase_name, uppercase_name in self.arrays:
# in which profile is the array available?
profile_check = 'profile.desktop()'
if camelcase_name in self.arrays_es1:
profile_check = '(' + profile_check + ' || es1)';
function_name = 'gl%sPointer' % camelcase_name
enable_name = 'GL_%s_ARRAY' % uppercase_name
binding_name = 'GL_%s_ARRAY_BUFFER_BINDING' % uppercase_name
function = api.getFunctionByName(function_name)
print ' // %s' % function.prototype()
print ' if (%s) {' % profile_check
self.array_trace_prolog(api, uppercase_name)
self.array_prolog(api, uppercase_name)
print ' if (_glIsEnabled(%s)) {' % enable_name
print ' GLint _binding = _glGetInteger(%s);' % binding_name
print ' if (!_binding) {'
# Get the arguments via glGet*
for arg in function.args:
arg_get_enum = 'GL_%s_ARRAY_%s' % (uppercase_name, arg.name.upper())
arg_get_function, arg_type = TypeGetter().visit(arg.type)
print ' %s %s = 0;' % (arg_type, arg.name)
print ' _%s(%s, &%s);' % (arg_get_function, arg_get_enum, arg.name)
arg_names = ', '.join([arg.name for arg in function.args[:-1]])
print ' size_t _size = _%s_size(%s, count);' % (function.name, arg_names)
# Emit a fake function
self.array_trace_intermezzo(api, uppercase_name)
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
if arg.name != 'pointer':
self.serializeValue(arg.type, arg.name)
else:
print ' trace::localWriter.writeBlob((const void *)%s, _size);' % (arg.name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
print ' }'
self.array_epilog(api, uppercase_name)
self.array_trace_epilog(api, uppercase_name)
print ' }'
print
# Samething, but for glVertexAttribPointer*
#
# Some variants of glVertexAttribPointer alias conventional and generic attributes:
# - glVertexAttribPointer: no
# - glVertexAttribPointerARB: implementation dependent
# - glVertexAttribPointerNV: yes
#
# This means that the implementations of these functions do not always
# alias, and they need to be considered independently.
#
print ' // ES1 does not support generic vertex attributes'
print ' if (es1)'
print ' return;'
print
print ' vertex_attrib _vertex_attrib = _get_vertex_attrib();'
print
for suffix in ['', 'NV']:
if suffix:
SUFFIX = '_' + suffix
else:
SUFFIX = suffix
function_name = 'glVertexAttribPointer' + suffix
function = api.getFunctionByName(function_name)
print ' // %s' % function.prototype()
print ' if (_vertex_attrib == VERTEX_ATTRIB%s) {' % SUFFIX
if suffix == 'NV':
print ' GLint _max_vertex_attribs = 16;'
else:
print ' GLint _max_vertex_attribs = _glGetInteger(GL_MAX_VERTEX_ATTRIBS);'
print ' for (GLint index = 0; index < _max_vertex_attribs; ++index) {'
print ' GLint _enabled = 0;'
if suffix == 'NV':
print ' _glGetIntegerv(GL_VERTEX_ATTRIB_ARRAY0_NV + index, &_enabled);'
else:
print ' _glGetVertexAttribiv%s(index, GL_VERTEX_ATTRIB_ARRAY_ENABLED%s, &_enabled);' % (suffix, SUFFIX)
print ' if (_enabled) {'
print ' GLint _binding = 0;'
if suffix != 'NV':
# It doesn't seem possible to use VBOs with NV_vertex_program.
print ' _glGetVertexAttribiv%s(index, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING%s, &_binding);' % (suffix, SUFFIX)
print ' if (!_binding) {'
# Get the arguments via glGet*
for arg in function.args[1:]:
if suffix == 'NV':
arg_get_enum = 'GL_ATTRIB_ARRAY_%s%s' % (arg.name.upper(), SUFFIX)
else:
arg_get_enum = 'GL_VERTEX_ATTRIB_ARRAY_%s%s' % (arg.name.upper(), SUFFIX)
arg_get_function, arg_type = TypeGetter('glGetVertexAttrib', False, suffix).visit(arg.type)
print ' %s %s = 0;' % (arg_type, arg.name)
print ' _%s(index, %s, &%s);' % (arg_get_function, arg_get_enum, arg.name)
arg_names = ', '.join([arg.name for arg in function.args[1:-1]])
print ' size_t _size = _%s_size(%s, count);' % (function.name, arg_names)
# Emit a fake function
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
if arg.name != 'pointer':
self.serializeValue(arg.type, arg.name)
else:
print ' trace::localWriter.writeBlob((const void *)%s, _size);' % (arg.name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
print ' }'
print ' }'
print ' }'
print
# Restore the original array_buffer
print ' if (_array_buffer) {'
self.fake_glBindBuffer(api, 'GL_ARRAY_BUFFER', '_array_buffer')
print ' }'
print
print '}'
print
# Fake glStringMarkerGREMEDY
print r'static void _fakeStringMarker(GLsizei len, const GLvoid * string) {'
glStringMarkerGREMEDY = api.getFunctionByName('glStringMarkerGREMEDY')
self.fake_call(glStringMarkerGREMEDY, ['len', 'string'])
print r'}'
#
# Hooks for glTexCoordPointer, which is identical to the other array
# pointers except the fact that it is indexed by glClientActiveTexture.
#
def array_prolog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' GLint max_units = 0;'
print ' if (ctx->profile.desktop())'
print ' _glGetIntegerv(GL_MAX_TEXTURE_COORDS, &max_units);'
print ' else'
print ' _glGetIntegerv(GL_MAX_TEXTURE_UNITS, &max_units);'
print ' GLint client_active_texture = GL_TEXTURE0;'
print ' if (max_units > 0) {'
print ' _glGetIntegerv(GL_CLIENT_ACTIVE_TEXTURE, &client_active_texture);'
print ' }'
print ' GLint unit = 0;'
print ' do {'
print ' GLint texture = GL_TEXTURE0 + unit;'
print ' if (max_units > 0) {'
print ' _glClientActiveTexture(texture);'
print ' }'
def array_trace_prolog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' bool client_active_texture_dirty = false;'
def array_epilog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' } while (++unit < max_units);'
self.array_cleanup(api, uppercase_name)
def array_cleanup(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (max_units > 0) {'
print ' _glClientActiveTexture(client_active_texture);'
print ' }'
def array_trace_intermezzo(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (texture != client_active_texture || client_active_texture_dirty) {'
print ' client_active_texture_dirty = true;'
self.fake_glClientActiveTexture_call(api, "texture");
print ' }'
def array_trace_epilog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (client_active_texture_dirty) {'
self.fake_glClientActiveTexture_call(api, "client_active_texture");
print ' }'
def fake_glBindBuffer(self, api, target, buffer):
function = api.getFunctionByName('glBindBuffer')
self.fake_call(function, [target, buffer])
def fake_glClientActiveTexture_call(self, api, texture):
function = api.getFunctionByName('glClientActiveTexture')
self.fake_call(function, [texture])
def emitFakeTexture2D(self):
function = glapi.glapi.getFunctionByName('glTexImage2D')
instances = function.argNames()
print ' unsigned _fake_call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
self.serializeArg(function, arg)
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_fake_call);'
print ' trace::localWriter.endLeave();'
| |
from numpy import array, asarray, dtype
from numpy.random import RandomState
from numpy.testing import assert_, assert_array_equal, assert_equal
from limix._data import conform_dataset
from pandas import DataFrame, Series
from xarray import DataArray
def test_dataset_conform_dataset():
y = array([-1.2, 3.4, 0.1])
samples = ["sample{}".format(i) for i in range(len(y))]
y = DataFrame(data=y, index=samples)
random = RandomState(0)
K = random.randn(3, 4)
K = K.dot(K.T)
K = DataFrame(data=K, index=samples, columns=samples)
M = random.randn(3, 2)
M = DataFrame(data=M, index=samples)
G = random.randn(2, 4)
G = DataFrame(data=G, index=samples[:2])
data = conform_dataset(y, M=M, K=K)
assert_array_equal(y.values, data["y"].values)
y = array([-1.2, 3.4, 0.1, 0.1, 0.0, -0.2])
data = conform_dataset(DataFrame(data=y, index=samples + samples), M=M, G=G, K=K)
assert_equal(data["y"].shape, (4, 1))
assert_equal(data["M"].shape, (4, 2))
assert_equal(data["G"].shape, (4, 4))
assert_equal(data["K"].shape, (4, 4))
samples = ["sample0", "sample1", "sample0", "sample1"]
assert_array_equal(data["y"].sample, samples)
assert_array_equal(data["M"].sample, samples)
assert_array_equal(data["G"].sample, samples)
assert_array_equal(data["K"].sample_0, samples)
assert_array_equal(data["K"].sample_1, samples)
assert_array_equal(data["M"].covariate, [0, 1])
assert_array_equal(data["G"].candidate, [0, 1, 2, 3])
def test_dataset_pandas_xarray_dask():
import numpy as np
import dask.array as da
import dask.dataframe as dd
import pandas as pd
from limix._data._conform import to_dataarray
x = []
x.append([1.0, 2.0, 3.0])
x.append(np.asarray([1.0, 2.0, 3.0]))
x.append(np.asarray([[1.0], [2.0], [3.0]]))
x.append(np.asarray([[1], [2], [3]], dtype=int))
x.append(da.from_array(x[0], 2))
x.append(da.from_array(x[1], 2))
x.append(da.from_array(x[2], 2))
x.append(da.from_array(x[3], 2))
n = len(x)
for i in range(n):
if isinstance(x[i], da.Array):
tmp = np.asarray(x[i])
if tmp.ndim == 2:
tmp = tmp.ravel()
x.append(dd.from_array(tmp))
else:
x.append(dd.from_array(x[i]))
else:
tmp = np.asarray(x[i])
if tmp.ndim == 2:
tmp = tmp.ravel()
x.append(pd.Series(tmp))
else:
x.append(pd.Series(x[i]))
for i in range(n):
if isinstance(x[i], da.Array):
x.append(dd.from_array(x[i]))
elif isinstance(x[i], np.ndarray):
x.append(pd.DataFrame(x[i]))
n = len(x)
for i in range(n):
x.append(DataArray(x[i]))
x.append(x[-1].chunk(2))
print()
for xi in x:
y = to_dataarray(xi)
assert_equal(y.dtype, dtype("float64"))
assert_array_equal(y.shape, (3, 1))
assert_(isinstance(y, DataArray))
if isinstance(xi, Series):
assert_array_equal(list(xi.index), list(y.coords["dim_0"].values))
if isinstance(xi, DataFrame):
assert_array_equal(list(xi.columns), list(y.coords["dim_1"].values))
is_dask = (
hasattr(xi, "chunks")
and xi.chunks is not None
or hasattr(xi, "values")
and hasattr(xi, "values")
and hasattr(xi.values, "chunks")
and xi.values.chunks is not None
)
assert_equal(is_dask, y.chunks is not None)
assert_array_equal(asarray(xi).ravel(), asarray(y).ravel())
def test_dataset_different_size():
random = RandomState(0)
n0 = 5
n1 = 3
y = random.randn(n0)
samples = ["sample{}".format(i) for i in range(len(y))]
y = DataFrame(data=y, index=samples)
G = random.randn(n1, 10)
data = conform_dataset(y, G=G)
assert_array_equal(data["y"].values, y[:n1])
assert_array_equal(data["G"].values, G[:n1, :])
n0 = 3
n1 = 5
y = random.randn(n0)
samples = ["sample{}".format(i) for i in range(len(y))]
y = DataFrame(data=y, index=samples)
G = random.randn(n1, 10)
data = conform_dataset(y, G=G)
assert_array_equal(data["y"].values, y[:n0])
assert_array_equal(data["G"].values, G[:n0, :])
def test_dataset_underline_prefix():
data = {
"coords": {
"trait": {"data": "gene1", "dims": (), "attrs": {}},
"_sample": {
"data": ["0", "1", "2", "3", "4", "5"],
"dims": ("_sample",),
"attrs": {},
},
},
"attrs": {},
"dims": ("_sample",),
"data": [
-3.7523451473100002,
-0.421128991488,
-0.536290093143,
-0.9076827328799999,
-0.251889685747,
-0.602998035829,
],
"name": "phenotype",
}
y = DataArray.from_dict(data)
data = {
"coords": {
"fid": {
"data": [
"HG00111",
"HG00112",
"HG00116",
"HG00121",
"HG00133",
"HG00135",
"HG00142",
],
"dims": ("sample",),
"attrs": {},
},
"iid": {
"data": [
"HG00111",
"HG00112",
"HG00116",
"HG00121",
"HG00133",
"HG00135",
"HG00142",
],
"dims": ("sample",),
"attrs": {},
},
"father": {
"data": ["0", "0", "0", "0", "0", "0", "0"],
"dims": ("sample",),
"attrs": {},
},
"mother": {
"data": ["0", "0", "0", "0", "0", "0", "0"],
"dims": ("sample",),
"attrs": {},
},
"gender": {
"data": ["0", "0", "0", "0", "0", "0", "0"],
"dims": ("sample",),
"attrs": {},
},
"trait": {
"data": ["-9", "-9", "-9", "-9", "-9", "-9", "-9"],
"dims": ("sample",),
"attrs": {},
},
"i": {"data": [0, 1], "dims": ("candidate",), "attrs": {}},
"sample": {
"data": [
"HG00111",
"HG00112",
"HG00116",
"HG00121",
"HG00133",
"HG00135",
"HG00142",
],
"dims": ("sample",),
"attrs": {},
},
"chrom": {"data": ["22", "22"], "dims": ("candidate",), "attrs": {}},
"snp": {
"data": ["rs146752890", "rs62224610"],
"dims": ("candidate",),
"attrs": {},
},
"cm": {"data": [0.0, 0.0], "dims": ("candidate",), "attrs": {}},
"pos": {"data": [16050612, 16051347], "dims": ("candidate",), "attrs": {}},
"a0": {"data": ["G", "C"], "dims": ("candidate",), "attrs": {}},
"a1": {"data": ["C", "G"], "dims": ("candidate",), "attrs": {}},
"candidate": {
"data": ["rs146752890", "rs62224610"],
"dims": ("candidate",),
"attrs": {},
},
},
"attrs": {},
"dims": ("sample", "candidate"),
"data": [
[2.0, 0.0],
[1.0, 2.0],
[2.0, 2.0],
[1.0, 2.0],
[2.0, 1.0],
[1.0, 1.0],
[2.0, 2.0],
],
"name": "genotype",
}
G = DataArray.from_dict(data)
data = conform_dataset(y, G=G)
assert_equal(
data["y"].coords["sample"][:3].values, ["HG00111", "HG00112", "HG00116"]
)
assert_equal(data["y"].shape, (6, 1))
assert_equal(data["y"].dims, ("sample", "trait"))
data = {
"coords": {
"trait": {"data": "gene1", "dims": (), "attrs": {}},
"sample": {
"data": ["0", "1", "2", "3", "4", "5"],
"dims": ("sample",),
"attrs": {},
},
},
"attrs": {},
"dims": ("sample",),
"data": [
-3.7523451473100002,
-0.421128991488,
-0.536290093143,
-0.9076827328799999,
-0.251889685747,
-0.602998035829,
],
"name": "phenotype",
}
y = DataArray.from_dict(data)
data = conform_dataset(y, G=G)
assert_equal(data["y"].shape, (0, 1))
assert_equal(data["y"].dims, ("sample", "trait"))
data = {
"coords": {"trait": {"data": "gene1", "dims": (), "attrs": {}}},
"attrs": {},
"dims": ("sample",),
"data": [
-3.7523451473100002,
-0.421128991488,
-0.536290093143,
-0.9076827328799999,
-0.251889685747,
-0.602998035829,
],
"name": "phenotype",
}
y = DataArray.from_dict(data)
data = conform_dataset(y, G=G)
assert_equal(
data["y"].coords["sample"][:3].values, ["HG00111", "HG00112", "HG00116"]
)
assert_equal(data["y"].shape, (6, 1))
assert_equal(data["y"].dims, ("sample", "trait"))
| |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from robot import utils
from robot.errors import DataError
from robot.variables import is_list_var
from .arguments import (PythonKeywordArguments, JavaKeywordArguments,
DynamicKeywordArguments, RunKeywordArguments,
PythonInitArguments, JavaInitArguments)
from .keywords import Keywords, Keyword
from .outputcapture import OutputCapturer
from .runkwregister import RUN_KW_REGISTER
from .signalhandler import STOP_SIGNAL_MONITOR
if utils.is_jython:
from org.python.core import PyReflectedFunction, PyReflectedConstructor
def _is_java_init(init):
return isinstance(init, PyReflectedConstructor)
def _is_java_method(method):
return hasattr(method, 'im_func') \
and isinstance(method.im_func, PyReflectedFunction)
else:
_is_java_init = _is_java_method = lambda item: False
def Handler(library, name, method):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _RunKeywordHandler(library, name, method)
if _is_java_method(method):
return _JavaHandler(library, name, method)
else:
return _PythonHandler(library, name, method)
def DynamicHandler(library, name, method, doc, argspec):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _DynamicRunKeywordHandler(library, name, method, doc, argspec)
return _DynamicHandler(library, name, method, doc, argspec)
def InitHandler(library, method, docgetter=None):
Init = _PythonInitHandler if not _is_java_init(method) else _JavaInitHandler
return Init(library, '__init__', method, docgetter)
class _BaseHandler(object):
type = 'library'
_doc = ''
def __init__(self, library, handler_name, handler_method):
self.library = library
self.name = utils.printable_name(handler_name, code_style=True)
self.arguments = self._parse_arguments(handler_method)
def _parse_arguments(self, handler_method):
raise NotImplementedError(self.__class__.__name__)
@property
def doc(self):
return self._doc
@property
def longname(self):
return '%s.%s' % (self.library.name, self.name)
@property
def shortdoc(self):
return self.doc.splitlines()[0] if self.doc else ''
@property
def libname(self):
return self.library.name
class _RunnableHandler(_BaseHandler):
def __init__(self, library, handler_name, handler_method):
_BaseHandler.__init__(self, library, handler_name, handler_method)
self._handler_name = handler_name
self._method = self._get_initial_handler(library, handler_name,
handler_method)
def _get_initial_handler(self, library, name, method):
if library.scope == 'GLOBAL':
return self._get_global_handler(method, name)
return None
def init_keyword(self, varz):
pass
def run(self, context, args):
if context.dry_run:
return self._dry_run(context, args)
return self._run(context, args)
def _dry_run(self, context, args):
if self.longname == 'BuiltIn.Import Library':
return self._run(context, args)
self.arguments.check_arg_limits_for_dry_run(args)
return None
def _run(self, context, args):
output = context.output
positional, named = \
self.arguments.resolve(args, context.get_current_vars(), output)
runner = self._runner_for(self._current_handler(), output, positional,
named, self._get_timeout(context.namespace))
return self._run_with_output_captured_and_signal_monitor(runner, context)
def _runner_for(self, handler, output, positional, named, timeout):
if timeout and timeout.active:
output.debug(timeout.get_message)
return lambda: timeout.run(handler, args=positional, kwargs=named)
return lambda: handler(*positional, **named)
def _run_with_output_captured_and_signal_monitor(self, runner, context):
with OutputCapturer():
return self._run_with_signal_monitoring(runner, context)
def _run_with_signal_monitoring(self, runner, context):
try:
STOP_SIGNAL_MONITOR.start_running_keyword(context.teardown)
return runner()
finally:
STOP_SIGNAL_MONITOR.stop_running_keyword()
def _current_handler(self):
if self._method:
return self._method
return self._get_handler(self.library.get_instance(),
self._handler_name)
def _get_global_handler(self, method, name):
return method
def _get_handler(self, lib_instance, handler_name):
return getattr(lib_instance, handler_name)
def _get_timeout(self, namespace):
timeoutable = self._get_timeoutable_items(namespace)
if timeoutable:
return min(item.timeout for item in timeoutable)
return None
def _get_timeoutable_items(self, namespace):
items = namespace.uk_handlers[:]
if self._test_running_and_not_in_teardown(namespace.test):
items.append(namespace.test)
return items
def _test_running_and_not_in_teardown(self, test):
return test and test.status == 'RUNNING'
class _PythonHandler(_RunnableHandler):
def __init__(self, library, handler_name, handler_method):
_RunnableHandler.__init__(self, library, handler_name, handler_method)
self._doc = utils.getdoc(handler_method)
def _parse_arguments(self, handler_method):
return PythonKeywordArguments(handler_method, self.longname)
class _JavaHandler(_RunnableHandler):
def _parse_arguments(self, handler_method):
return JavaKeywordArguments(handler_method, self.longname)
class _DynamicHandler(_RunnableHandler):
def __init__(self, library, handler_name, handler_method, doc='',
argspec=None):
self._argspec = argspec
_RunnableHandler.__init__(self, library, handler_name, handler_method)
self._run_keyword_method_name = handler_method.__name__
self._doc = doc is not None and utils.unic(doc) or ''
def _parse_arguments(self, handler_method):
return DynamicKeywordArguments(self._argspec, self.longname)
def _get_handler(self, lib_instance, handler_name):
runner = getattr(lib_instance, self._run_keyword_method_name)
return self._get_dynamic_handler(runner, handler_name)
def _get_global_handler(self, method, name):
return self._get_dynamic_handler(method, name)
def _get_dynamic_handler(self, runner, name):
def handler(*args):
return runner(name, list(args))
return handler
class _RunKeywordHandler(_PythonHandler):
def __init__(self, library, handler_name, handler_method):
_PythonHandler.__init__(self, library, handler_name, handler_method)
self._handler_method = handler_method
def _run_with_signal_monitoring(self, runner, context):
# With run keyword variants, only the keyword to be run can fail
# and therefore monitoring should not raise exception yet.
return runner()
def _parse_arguments(self, handler_method):
arg_index = self._get_args_to_process()
return RunKeywordArguments(handler_method, self.longname, arg_index)
def _get_args_to_process(self):
return RUN_KW_REGISTER.get_args_to_process(self.library.orig_name,
self.name)
def _get_timeout(self, namespace):
return None
def _dry_run(self, context, args):
_RunnableHandler._dry_run(self, context, args)
keywords = self._get_runnable_dry_run_keywords(context, args)
keywords.run(context)
def _get_runnable_dry_run_keywords(self, context, args):
keywords = Keywords([])
for keyword in self._get_dry_run_keywords(args):
if self._variable_syntax_in(keyword.name, context):
continue
keywords.add_keyword(keyword)
return keywords
def _variable_syntax_in(self, kw_name, context):
try:
resolved = context.namespace.variables.replace_string(kw_name)
#Variable can contain value, but it might be wrong,
#therefore it cannot be returned
return resolved != kw_name
except DataError:
return True
def _get_dry_run_keywords(self, args):
if self._handler_name == 'run_keyword_if':
return list(self._get_run_kw_if_keywords(args))
if self._handler_name == 'run_keywords':
return list(self._get_run_kws_keywords(args))
if 'name' in self.arguments.names and self._get_args_to_process() > 0:
return self._get_default_run_kw_keywords(args)
return []
def _get_run_kw_if_keywords(self, given_args):
for kw_call in self._get_run_kw_if_calls(given_args):
if kw_call:
yield Keyword(kw_call[0], kw_call[1:])
def _get_run_kw_if_calls(self, given_args):
while 'ELSE IF' in given_args:
kw_call, given_args = self._split_run_kw_if_args(given_args, 'ELSE IF', 2)
yield kw_call
if 'ELSE' in given_args:
kw_call, else_call = self._split_run_kw_if_args(given_args, 'ELSE', 1)
yield kw_call
yield else_call
elif self._validate_kw_call(given_args):
expr, kw_call = given_args[0], given_args[1:]
if not is_list_var(expr):
yield kw_call
def _split_run_kw_if_args(self, given_args, control_word, required_after):
index = given_args.index(control_word)
expr_and_call = given_args[:index]
remaining = given_args[index+1:]
if not (self._validate_kw_call(expr_and_call) and
self._validate_kw_call(remaining, required_after)):
raise DataError("Invalid 'Run Keyword If' usage.")
if is_list_var(expr_and_call[0]):
return [], remaining
return expr_and_call[1:], remaining
def _validate_kw_call(self, kw_call, min_length=2):
if len(kw_call) >= min_length:
return True
return any(is_list_var(item) for item in kw_call)
def _get_run_kws_keywords(self, given_args):
for kw_call in self._get_run_kws_calls(given_args):
yield Keyword(kw_call[0], kw_call[1:])
def _get_run_kws_calls(self, given_args):
if 'AND' not in given_args:
for kw_call in given_args:
yield [kw_call,]
else:
while 'AND' in given_args:
index = given_args.index('AND')
kw_call, given_args = given_args[:index], given_args[index + 1:]
yield kw_call
if given_args:
yield given_args
def _get_default_run_kw_keywords(self, given_args):
index = self.arguments.names.index('name')
return [Keyword(given_args[index], given_args[index+1:])]
class _XTimesHandler(_RunKeywordHandler):
def __init__(self, handler, name):
_RunKeywordHandler.__init__(self, handler.library, handler.name,
handler._handler_method)
self.name = name
self._doc = "*DEPRECATED* Replace X times syntax with 'Repeat Keyword'."
def run(self, context, args):
resolved_times = context.namespace.variables.replace_string(self.name)
_RunnableHandler.run(self, context, [resolved_times] + args)
@property
def longname(self):
return self.name
class _DynamicRunKeywordHandler(_DynamicHandler, _RunKeywordHandler):
_parse_arguments = _RunKeywordHandler._parse_arguments
_get_timeout = _RunKeywordHandler._get_timeout
class _PythonInitHandler(_PythonHandler):
def __init__(self, library, handler_name, handler_method, docgetter):
_PythonHandler.__init__(self, library, handler_name, handler_method)
self._docgetter = docgetter
@property
def doc(self):
if self._docgetter:
self._doc = self._docgetter() or self._doc
self._docgetter = None
return self._doc
def _parse_arguments(self, handler_method):
return PythonInitArguments(handler_method, self.library.name)
class _JavaInitHandler(_BaseHandler):
def __init__(self, library, handler_name, handler_method, docgetter):
_BaseHandler.__init__(self, library, handler_name, handler_method)
self._docgetter = docgetter
@property
def doc(self):
if self._docgetter:
self._doc = self._docgetter() or self._doc
self._docgetter = None
return self._doc
def _parse_arguments(self, handler_method):
return JavaInitArguments(handler_method, self.library.name)
| |
# generate_date, was before in main.py
#Author: Len Feremans 8-Feb-2016
import os, csv, sys, random
import datetime
from datetime import timedelta
import math
from math import log
import occsimread, appliance, activitydat, appsimfun, bulbdat
def generate_data_range(iResidents, Dwell,iIrradianceThreshold,iRandomHouse,from_date, to_date):
if to_date < from_date:
raise ValueError("from_date > to_date!")
days = []
start_date = from_date
while(start_date < to_date):
days.append(start_date)
start_date = start_date + timedelta(days=1)
first = True
data = []
for day in days:
addHeader = False
if first:
addHeader = True
first = False
bWeekend = day.weekday() == 5 or day.weekday() == 6
#Generate each day...
ResultofOccupancySim = occsimread.OccupanceSim(iResidents,bWeekend)
iMonth = day.month
day_data = generate_date_single_day(Dwell, ResultofOccupancySim, bWeekend, iMonth, iIrradianceThreshold, iRandomHouse, addHeader)
datetime = day
datetime.replace(hour=0,minute=0,second=0)
#copy day_data, but add timestamp
if addHeader:
first_row = day_data.pop(0)
first_row.insert(0,'Timestamp')
data.append(first_row)
for row in day_data:
if datetime < from_date or datetime > to_date: #check for hour-range
continue
row.insert(0,datetime)
data.append(row)
datetime = datetime + timedelta(minutes=1)
print("Generated %s" % day)
return data
def generate_date_single_day(Dwell, ResultofOccupancySim, bWeekend, iMonth, iIrradianceThreshold, iRandomHouse, addHeader):
#print("generate_date_single_day(%s,%s,%s,%s,%s,%s,%s" % (Dwell, ResultofOccupancySim, bWeekend, iMonth, iIrradianceThreshold, iRandomHouse, addHeader))
oMonthlyRelativeTemperatureModifier = [0, 1.63, 1.821, 1.595, 0.867, 0.763, 0.191, 0.156, 0.087, 0.399, 0.936, 1.561, 1.994]
vSimulationArray =[[0 for j in range(33)] for i in range(1442)]
# This holds result of computation: fore ach appliance power consumption for each minute of the day
for iAppliance in range(33):
testapp=100
# singleapp=9
iCycleTimeLeft = 0
iRestartDelayTimeLeft = 0
sApplianceType=appliance.appliances[iAppliance][17]
#if iAppliance==testapp: print "Appliance", sApplianceType, "is: ",
iMeanCycleLength = appliance.appliances[iAppliance][5]
#print iMeanCycleLength
iCyclesPerYear = appliance.appliances[iAppliance][4]
iStandbyPower = appliance.appliances[iAppliance][7]
iRatedPower = appliance.appliances[iAppliance][6]
dCalibration = appliance.appliances[iAppliance][20]
dOwnership = appliance.appliances[iAppliance][2]
iTargetAveragekWhYear = appliance.appliances[iAppliance][23]
sUseProfile = appliance.appliances[iAppliance][18]
iRestartDelay = appliance.appliances[iAppliance][8]
if Dwell[iAppliance] == 'NO':
bHasAppliance=False
else:
bHasAppliance=True
#bHasAppliance = IIf(Range("'appliances'!D" + CStr(iAppliance + iApplianceSourceCellOffsetY)).Value = "YES", True, False)
#if iAppliance==testapp: print "Appliance", sApplianceType, "Mean Cycle", iMeanCycleLength, "Cycles/Year ", iCyclesPerYear, "Stand By", iStandbyPower, "Rated Power", iRatedPower, "Calib", dCalibration, "UseProfile", sUseProfile, "Restart Delay", iRestartDelay
# ' Write the appliance type into result
vSimulationArray[0][iAppliance] = sApplianceType
# ' Write the units into result
vSimulationArray[1][iAppliance] = "(W)"
#
# ' Check if this appliance is assigned to this dwelling
if bHasAppliance==False:
# ' This appliance is not applicable, so write zeros to the power demand
#for iCount in range(2,1441):
# vSimulationArray[iCount][iAppliance] = 0
if iAppliance==testapp: print("ABSENT")
# for iCount in range(0,1440):
# print vSimulationArray[iCount][iAppliance]
else:
#' Randomly delay the start of appliances that have a restart delay (e.g. cold appliances with more regular intervals)
iRestartDelayTimeLeft = int(random.random()*iRestartDelay*2)
#' Weighting is 2 just to provide some diversity
#
# ' Make the rated power variable over a normal distribution to provide some variation
iRatedPower = appsimfun.GetMonteCarloNormalDistGuess(iRatedPower,iRatedPower/10)
#if iAppliance==testapp: print "PRESENT", "Restart Delay Left: ", iRestartDelayTimeLeft, "Rated Power", iRatedPower
# Lighting_Model.GetMonteCarloNormalDistGuess(Val(iRatedPower), iRatedPower / 10)
#
#
# ' Loop through each minute of the day
iMinute = 1
while iMinute < 1440:
#print "Type"
#print sApplianceType
#print "Minute:"
#print iMinute
#
# ' Set the default (standby) power demand at this time step
iPower = iStandbyPower
#' Get the ten minute period count
iTenMinuteCount = int(((iMinute - 1) // 10))+1
#print iTenMinuteCount, 10 + iTenMinuteCount
#
# ' Get the number of current active occupants for this minute
# ' Convert from 10 minute to 1 minute resolution
# print 11 + ((iMinute - 1) // 10)
iActiveOccupants = ResultofOccupancySim[iTenMinuteCount-1]
# ' Now generate a key to get the activity statistics
# sKey = IIf(bWeekend, "1", "0") + "_" + CStr(iActiveOccupants) + "_" + sUseProfile
#
if bWeekend == False:
bweek=0
else:
bweek=1
#print bweek, iActiveOccupants, sUseProfile
sKey=activitydat.KeyRowId(bweek,iActiveOccupants,sUseProfile)
#print sKey
# ' If this appliance is off having completed a cycle (ie. a restart delay)
#if iAppliance==testapp: print "Time (min)", iMinute, "Ten Min", iTenMinuteCount, "Occ: ", iActiveOccupants, "key to stats", sKey
if (iCycleTimeLeft <=0 and iRestartDelayTimeLeft > 0):
# ' Decrement the cycle time left
iRestartDelayTimeLeft = iRestartDelayTimeLeft - 1
#if iAppliance==testapp: print "CASE A: on", iCycleTimeLeft, "power", iPower, "(restart delay ", iRestartDelayTimeLeft, ")"
#print "starts on ", iRestartDelayTimeLeft, " Cycles left ", iCycleTimeLeft
elif iCycleTimeLeft <= 0:
# ' There must be active occupants, or the profile must not depend on occupancy for a start event to occur
if (iActiveOccupants > 0 and sUseProfile != "CUSTOM") or (sUseProfile == "LEVEL"):
# ' Variable to store the event probability (default to 1)
dActivityProbability = 1
# ' For appliances that depend on activity profiles and is not a custom profile ...
if (sUseProfile != "LEVEL") and (sUseProfile != "ACTIVE_OCC") and (sUseProfile != "CUSTOM"):
# ' Get the activity statistics for this profile
#oActivityStatsItem = activitydat.actid[sKey][
#oActivityStatistics.Item(sKey)
# ' Get the probability for this activity profile for this time step
# TODO dActivityProbability = oActivityStatistics(sKey).Modifiers(iTenMinuteCount)
dActivityProbability=activitydat.actid[sKey-1][2+iTenMinuteCount]
#if (iAppliance==testapp): print "CASE B: column act", iTenMinuteCount, "value", dActivityProbability
#dActivityProbability=random.random()
#print iMinute, dActivityProbability
elif sApplianceType == "ELEC_SPACE_HEATING":
#' For electric space heaters ... (excluding night storage heaters)
# ' If this appliance is an electric space heater, then then activity probability is a function of the month of the year
dActivityProbability = oMonthlyRelativeTemperatureModifier[iMonth]
if (iAppliance==testapp): print("CASE C:")
#print dActivityProbability
else:
if (iAppliance==testapp): print("CASE D:")
pass
#' Check the probability of a start event
if random.random() < (dCalibration*dActivityProbability):
#StartAppliance(sApplianceType,iMeanCycleLength,iRestartDelay):
a=[]
a=appsimfun.StartAppliance(sApplianceType,iMeanCycleLength,iRestartDelay,iRatedPower,iStandbyPower)
# print a
iCycleTimeLeft=int(a[0])
iRestartDelayTimeLeft=int(a[1])
iPower = int(a[2])
#if (iAppliance==testapp):
# print "********************Starting app:"
# print sApplianceType, " on ", iRestartDelayTimeLeft, "cycles", iCycleTimeLeft, "power", iPower
#' Custom appliance handler: storage heaters have a simple representation
elif (sUseProfile == "CUSTOM" and sApplianceType == "STORAGE_HEATER"):
pass
# ' The number of cycles (one per day) set out in the calibration sheet
# ' is used to determine whether the storage heater is used
#
# ' This model does not account for the changes in the Economy 7 time
# ' It assumes that the time starts at 00:30 each day
# if (iTenMinuteCount == 4):
# oDate = "1/14/97
# # ' Get the month and day when the storage heaters are turned on and off, using the number of cycles per year
# oDateOff = DateAdd("d", iCyclesPerYear / 2, oDate)
# oDateOn = DateAdd("d", 0 - iCyclesPerYear / 2, oDate)
# iMonthOff = DatePart("m", oDateOff)
# iMonthOn = DatePart("m", oDateOn)
# # If this is a month in which the appliance is turned on of off
# if (iMonth == iMonthOff) or (iMonth == iMonthOn):
# # ' Pick a 50% chance since this month has only a month of year resolution
# dProbability = 0.5 / 10 # ' (since there are 10 minutes in this period)
# elif (iMonth > iMonthOff) and (iMonth < iMonthOn):
# # ' The appliance is not used in summer
# dProbability = 0
# else:
# # ' The appliance is used in winter
# dProbability = 1
# if (iAppliance==testapp): print "CASE E:"
# dProbability=0.4
## #' Determine if a start event occurs
# if (random.random() <= dProbability):
# # ' This is a start event
# #StartAppliance
# if (iAppliance==testapp): print "CASE F:"
# a=[]
# a=appsimfun.StartAppliance(sApplianceType,iMeanCycleLength,iRestartDelay,iRatedPower,iStandbyPower)
# iCycleTimeLeft=a[1]
# iRestartDelayTimeLeft=a[0]
# iPower = a[2]
# #iCycleTimeLeft,iRestartDelayTimeLeft,iPower=appsimfun.StartAppliance(sApplianceType,iRestartDelay,iRatedPower,iCycleTimeLeft,iStandbyPower)
# #print iCycleTimeLeft
else:
if (iAppliance==testapp): print("Case Z", iPower)
else:
#if (iAppliance==testapp): print "Case X"
# ' The appliance is on - if the occupants become inactive, switch off the appliance
if (iActiveOccupants == 0) and (sUseProfile != "LEVEL") and (sUseProfile != "ACT_LAUNDRY") and (sUseProfile != "CUSTOM"):
if (iAppliance==testapp): print ("CASE G:", iPower)
else:
#if (iAppliance==testapp): print "CASE H:"
#' Set the power
#GetPowerUsage(sApplianceType,iRatedPower,iCycleTimeLeft,iStandbyPower)
#print iCycleTimeLeft
iPower = appsimfun.GetPowerUse(sApplianceType,iRatedPower,iCycleTimeLeft,iStandbyPower)
#if (iAppliance==testapp): print "CASE H: App is on ", iPower
#print "Wash"
#print iPower
#print iCycleTimeLeft
#' Decrement the cycle time left
iCycleTimeLeft = iCycleTimeLeft-1
#' Set the appliance power at this time step
vSimulationArray[1 + iMinute][iAppliance] = iPower
# ' Increment the time
iMinute = iMinute + 1
#print "Power", iPower
#' Write the data back to the simulation sheet
#print vSimulationArray
#def RunLightingSimulation():
#Range("light_sim_data!D5").Value = iRandomHouse
#
#' Declare an array to store the lighting unit configuration
#' vBulbArray (1,1) Example dwelling number
#' (1,2) Number of lighting units
#' (1,3..) Lighting unit ratings
#Dim vBulbArray As Variant
#
#
#' Get the bulb data
#vBulbArray = Range("bulbs!A" + CStr(iRandomHouse + 10) + ":BI" + CStr(iRandomHouse + 10))
#
#' Get the number of bulbs
iNumBulbs = bulbdat.bulbs[iRandomHouse-1][1]
#Range("light_sim_data!D3").Value = iNumBulbs
#
#' Declare an array to store the simulation data
#' vSimulation array (1,n) Bulb number header
#' (2,n) Rating
#' (3,n) Relative use
#' (4-1443,n) Demand
#Dim vSimulationArray(1 To 1443, 1 To 60) As Variant
LSimulationArray =[[0 for j in range(60)] for i in range(1444)]
OccLights=[ 0.000, 1.000, 1.52814569536424,1.69370860927152,1.98344370860927,2.09437086092715]
CummDur= [0.111111111, 0.222222222, 0.333333333, 0.444444444, 0.555555556, 0.666666667, 0.777777778, 0.888888889, 1]
LowDur= [1,2,3,5,9,17,28,50,92]
UppDur=[1,2,4,8,16,27,49,91,259]
#
#' Load the occupancy array
#Dim vOccupancyArray As Variant
#vOccupancyArray = Range("occ_sim_data!C11:C154")
#ResultofOccupancySim
#
#' Load the irradiance array
#Dim vIrradianceArray As Variant
#vIrradianceArray = Range("irradiance!C12:C1451")
#' Get the calibration scalar
fCalibrationScalar = 0.00815368639667705
#Range("light_config!F24").Value
irrdat =[0 for i in range(1440)]
datadir = os.path.dirname(__file__) + '/../data'
f = open(datadir + '/irradiance.csv', 'rt')
reader = csv.reader(f,quoting=csv.QUOTE_NONNUMERIC)
i=0
for row in reader:
# print row
irrdat[i]=float(row[iMonth-1])
i=i+1
f.close()
#print irrdat
#ocs' For each bulb
for i in range(1,iNumBulbs+1):
# ' Get the bulb rating
iRating = bulbdat.bulbs[iRandomHouse-1][i + 1]
#print iRandomHouse, i, iNumBulbs, iRating
#
# ' Store the bulb number
LSimulationArray[0][i] = i
#
# ' Store the rating
LSimulationArray[1][i] = iRating
# print LSimulationArray
#
# ' Assign a random bulb use weighting to this bulb
# ' Note that the calibration scalar is multiplied here to save processing time later
fCalibratedRelativeUseWeighting = -fCalibrationScalar * log(random.random())
LSimulationArray[2][i]= fCalibratedRelativeUseWeighting
# print i, fCalibratedRelativeUseWeighting
#
# ' Calculate the bulb usage at each minute of the day
iTime = 1
while (iTime <= 1440):
# # ' Is this bulb switched on to start with?
# # This concept is not implemented in this example.
# #' The simplified assumption is that all bulbs are off to start with.
# #
# #' Get the irradiance for this minute
#iIrradiance = irridiancedat.irrdat[iTime-1][iMonth]
iIrradiance = irrdat[iTime-1]
# print i, iTime, iIrradiance
#iTime=iTime+1
# #
# #' Get the number of current active occupants for this minute
# #' Convert from 10 minute to 1 minute resolution
# #iActiveOccupants = vOccupancyArray(((iTime - 1) \ 10) + 1, 1)
iTenMinuteCount = int(((iTime - 1) // 10))+1
# #print iTenMinuteCount, 10 + iTenMinuteCount
# #
# # ' Get the number of current active occupants for this minute
# # ' Convert from 10 minute to 1 minute resolution
# # print 11 + ((iMinute - 1) // 10)
iActiveOccupants = ResultofOccupancySim[iTenMinuteCount-1]
# #' Determine if the bulb switch-on condition is passed
# #' ie. Insuffient irradiance and at least one active occupant
# #' There is a 5% chance of switch on event if the irradiance is above the threshold
# #Dim bLowIrradiance As Boolean
if (iIrradiance < iIrradianceThreshold) or (random.random() < 0.05):
bLowIrradiance=True
else:
bLowIrradiance=False
# #bLowIrradiance = ((iIrradiance < iIrradianceThreshold) Or (Rnd() < 0.05))
# #
# #' Get the effective occupancy for this number of active occupants to allow for sharing
fEffectiveOccupancy = OccLights[iActiveOccupants]#
# print i, iTime, iActiveOccupants, fEffectiveOccupancy
# iTime=iTime+1
# ' Check the probability of a switch on at this time
if (bLowIrradiance and (random.random() < (fEffectiveOccupancy * fCalibratedRelativeUseWeighting))): #Then
# #
# # ' This is a switch on event
# #
# #' Determine how long this bulb is on for
r1 = random.random()
cml = 0
for j in range(1,10):
# #' Get the cumulative probability of this duration
cml = CummDur[j]#
# #' Check to see if this is the type of light
# print r1, cml
if r1 < cml:
# #' Get the durations
# iLowerDuration = Range("light_config!C" + CStr(54 + j)).Value
# iUpperDuration = Range("light_config!D" + CStr(54 + j)).Value
# #
# #' Get another random number
r2 = random.random()
# #
# #' Guess a duration in this range
iLightDuration = int((r2 * (UppDur[j] - LowDur[j])) + LowDur[j])
break
#
for j in range(1,iLightDuration):
if iTime > 1440: break
#
# #' Get the number of current active occupants for this minute
# iActiveOccupants = vOccupancyArray(((iTime - 1) \ 10) + 1, 1)
#
# #' If there are no active occupants, turn off the light
if iActiveOccupants == 0: break
#
# #' Store the demand
#print i, 2+iTime
LSimulationArray[2 + iTime][i] = iRating
# print iTime, "on"
# #' Increment the time
iTime = iTime + 1
#
else:
# #' The bulb remains off
LSimulationArray[2 + iTime][i]= 0
#print iTime, "off"
# #' Increment the time
iTime = iTime + 1
#
#
#' Write the simulation data to the sheet
#Range("light_sim_data!E9:BL1451") = vSimulationArray
#
LSimRes =[0 for i in range(1,1441)]
for i in range(3,1443):
res=0
for j in range(1,iNumBulbs+1):
res=res+ LSimulationArray[i][j]
LSimRes[i-3]=res
# print i-2, LSimRes[i-3]
#make output
#array for each minute of day starting from 00.00 until 23.59 (24 * 60 = 1440)
#for each device that is active in household + for total lights
header = ["Total","Lights"]
active_devices = []
for j in range(0,33):
if Dwell[j] == "YES":
header.append(appliance.appliances[j][0])
active_devices.append(j)
start = 0
end = 1440
TotalArray = [[0 for j in range(len(header))] for i in range(1440)]
if addHeader:
TotalArray = [[0 for j in range(len(header))] for i in range(1441)]
TotalArray[0] = header
start = 1
end = 1441
for i in range(start,end):
sim_lights_idx = i-start
sim_device_idx = i+2-start
TotalArray[i][1] = LSimRes[sim_lights_idx] #lights
for idx in range(len(active_devices)): #active devices
device_use = vSimulationArray[sim_device_idx][active_devices[idx]]
TotalArray[i][idx+2] = device_use
total=LSimRes[sim_lights_idx]
for idx in range(len(active_devices)): #active devices
total += vSimulationArray[sim_device_idx][active_devices[idx]]
TotalArray[i][0]=total
foefel_factor = 1.0/20.0
#waarom? Nu gemiddelde per maand rond de 20.000kwh, we willen rond de 1000kwh
for i in range(start,end):
for j in range(len(TotalArray[i])):
TotalArray[i][j]=TotalArray[i][j] * foefel_factor
return TotalArray
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AddressSpace
from ._models_py3 import ApplicationGateway
from ._models_py3 import ApplicationGatewayAuthenticationCertificate
from ._models_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from ._models_py3 import ApplicationGatewayBackendAddress
from ._models_py3 import ApplicationGatewayBackendAddressPool
from ._models_py3 import ApplicationGatewayBackendHealth
from ._models_py3 import ApplicationGatewayBackendHealthHttpSettings
from ._models_py3 import ApplicationGatewayBackendHealthPool
from ._models_py3 import ApplicationGatewayBackendHealthServer
from ._models_py3 import ApplicationGatewayBackendHttpSettings
from ._models_py3 import ApplicationGatewayConnectionDraining
from ._models_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from ._models_py3 import ApplicationGatewayFirewallRule
from ._models_py3 import ApplicationGatewayFirewallRuleGroup
from ._models_py3 import ApplicationGatewayFirewallRuleSet
from ._models_py3 import ApplicationGatewayFrontendIPConfiguration
from ._models_py3 import ApplicationGatewayFrontendPort
from ._models_py3 import ApplicationGatewayHttpListener
from ._models_py3 import ApplicationGatewayIPConfiguration
from ._models_py3 import ApplicationGatewayListResult
from ._models_py3 import ApplicationGatewayPathRule
from ._models_py3 import ApplicationGatewayProbe
from ._models_py3 import ApplicationGatewayRequestRoutingRule
from ._models_py3 import ApplicationGatewaySku
from ._models_py3 import ApplicationGatewaySslCertificate
from ._models_py3 import ApplicationGatewaySslPolicy
from ._models_py3 import ApplicationGatewayUrlPathMap
from ._models_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from ._models_py3 import AuthorizationListResult
from ._models_py3 import AzureAsyncOperationResult
from ._models_py3 import BGPCommunity
from ._models_py3 import BackendAddressPool
from ._models_py3 import BgpPeerStatus
from ._models_py3 import BgpPeerStatusListResult
from ._models_py3 import BgpServiceCommunity
from ._models_py3 import BgpServiceCommunityListResult
from ._models_py3 import BgpSettings
from ._models_py3 import ConnectionResetSharedKey
from ._models_py3 import ConnectionSharedKey
from ._models_py3 import ConnectivityDestination
from ._models_py3 import ConnectivityHop
from ._models_py3 import ConnectivityInformation
from ._models_py3 import ConnectivityIssue
from ._models_py3 import ConnectivityParameters
from ._models_py3 import ConnectivitySource
from ._models_py3 import DhcpOptions
from ._models_py3 import DnsNameAvailabilityResult
from ._models_py3 import EffectiveNetworkSecurityGroup
from ._models_py3 import EffectiveNetworkSecurityGroupAssociation
from ._models_py3 import EffectiveNetworkSecurityGroupListResult
from ._models_py3 import EffectiveNetworkSecurityRule
from ._models_py3 import EffectiveRoute
from ._models_py3 import EffectiveRouteListResult
from ._models_py3 import Error
from ._models_py3 import ErrorDetails
from ._models_py3 import ExpressRouteCircuit
from ._models_py3 import ExpressRouteCircuitArpTable
from ._models_py3 import ExpressRouteCircuitAuthorization
from ._models_py3 import ExpressRouteCircuitListResult
from ._models_py3 import ExpressRouteCircuitPeering
from ._models_py3 import ExpressRouteCircuitPeeringConfig
from ._models_py3 import ExpressRouteCircuitPeeringListResult
from ._models_py3 import ExpressRouteCircuitRoutesTable
from ._models_py3 import ExpressRouteCircuitRoutesTableSummary
from ._models_py3 import ExpressRouteCircuitServiceProviderProperties
from ._models_py3 import ExpressRouteCircuitSku
from ._models_py3 import ExpressRouteCircuitStats
from ._models_py3 import ExpressRouteCircuitsArpTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from ._models_py3 import ExpressRouteServiceProvider
from ._models_py3 import ExpressRouteServiceProviderBandwidthsOffered
from ._models_py3 import ExpressRouteServiceProviderListResult
from ._models_py3 import FlowLogInformation
from ._models_py3 import FlowLogStatusParameters
from ._models_py3 import FrontendIPConfiguration
from ._models_py3 import GatewayRoute
from ._models_py3 import GatewayRouteListResult
from ._models_py3 import IPAddressAvailabilityResult
from ._models_py3 import IPConfiguration
from ._models_py3 import InboundNatPool
from ._models_py3 import InboundNatRule
from ._models_py3 import IpsecPolicy
from ._models_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from ._models_py3 import LoadBalancer
from ._models_py3 import LoadBalancerListResult
from ._models_py3 import LoadBalancingRule
from ._models_py3 import LocalNetworkGateway
from ._models_py3 import LocalNetworkGatewayListResult
from ._models_py3 import NetworkInterface
from ._models_py3 import NetworkInterfaceAssociation
from ._models_py3 import NetworkInterfaceDnsSettings
from ._models_py3 import NetworkInterfaceIPConfiguration
from ._models_py3 import NetworkInterfaceListResult
from ._models_py3 import NetworkSecurityGroup
from ._models_py3 import NetworkSecurityGroupListResult
from ._models_py3 import NetworkWatcher
from ._models_py3 import NetworkWatcherListResult
from ._models_py3 import NextHopParameters
from ._models_py3 import NextHopResult
from ._models_py3 import OutboundNatRule
from ._models_py3 import PacketCapture
from ._models_py3 import PacketCaptureFilter
from ._models_py3 import PacketCaptureListResult
from ._models_py3 import PacketCaptureParameters
from ._models_py3 import PacketCaptureQueryStatusResult
from ._models_py3 import PacketCaptureResult
from ._models_py3 import PacketCaptureResultProperties
from ._models_py3 import PacketCaptureStorageLocation
from ._models_py3 import PatchRouteFilter
from ._models_py3 import PatchRouteFilterRule
from ._models_py3 import Probe
from ._models_py3 import PublicIPAddress
from ._models_py3 import PublicIPAddressDnsSettings
from ._models_py3 import PublicIPAddressListResult
from ._models_py3 import QueryTroubleshootingParameters
from ._models_py3 import Resource
from ._models_py3 import ResourceNavigationLink
from ._models_py3 import RetentionPolicyParameters
from ._models_py3 import Route
from ._models_py3 import RouteFilter
from ._models_py3 import RouteFilterListResult
from ._models_py3 import RouteFilterRule
from ._models_py3 import RouteFilterRuleListResult
from ._models_py3 import RouteListResult
from ._models_py3 import RouteTable
from ._models_py3 import RouteTableListResult
from ._models_py3 import SecurityGroupNetworkInterface
from ._models_py3 import SecurityGroupViewParameters
from ._models_py3 import SecurityGroupViewResult
from ._models_py3 import SecurityRule
from ._models_py3 import SecurityRuleAssociations
from ._models_py3 import SecurityRuleListResult
from ._models_py3 import SubResource
from ._models_py3 import Subnet
from ._models_py3 import SubnetAssociation
from ._models_py3 import SubnetListResult
from ._models_py3 import Topology
from ._models_py3 import TopologyAssociation
from ._models_py3 import TopologyParameters
from ._models_py3 import TopologyResource
from ._models_py3 import TroubleshootingDetails
from ._models_py3 import TroubleshootingParameters
from ._models_py3 import TroubleshootingRecommendedActions
from ._models_py3 import TroubleshootingResult
from ._models_py3 import TunnelConnectionHealth
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import UsagesListResult
from ._models_py3 import VerificationIPFlowParameters
from ._models_py3 import VerificationIPFlowResult
from ._models_py3 import VirtualNetwork
from ._models_py3 import VirtualNetworkGateway
from ._models_py3 import VirtualNetworkGatewayConnection
from ._models_py3 import VirtualNetworkGatewayConnectionListResult
from ._models_py3 import VirtualNetworkGatewayIPConfiguration
from ._models_py3 import VirtualNetworkGatewayListResult
from ._models_py3 import VirtualNetworkGatewaySku
from ._models_py3 import VirtualNetworkListResult
from ._models_py3 import VirtualNetworkListUsageResult
from ._models_py3 import VirtualNetworkPeering
from ._models_py3 import VirtualNetworkPeeringListResult
from ._models_py3 import VirtualNetworkUsage
from ._models_py3 import VirtualNetworkUsageName
from ._models_py3 import VpnClientConfiguration
from ._models_py3 import VpnClientParameters
from ._models_py3 import VpnClientRevokedCertificate
from ._models_py3 import VpnClientRootCertificate
except (SyntaxError, ImportError):
from ._models import AddressSpace # type: ignore
from ._models import ApplicationGateway # type: ignore
from ._models import ApplicationGatewayAuthenticationCertificate # type: ignore
from ._models import ApplicationGatewayAvailableWafRuleSetsResult # type: ignore
from ._models import ApplicationGatewayBackendAddress # type: ignore
from ._models import ApplicationGatewayBackendAddressPool # type: ignore
from ._models import ApplicationGatewayBackendHealth # type: ignore
from ._models import ApplicationGatewayBackendHealthHttpSettings # type: ignore
from ._models import ApplicationGatewayBackendHealthPool # type: ignore
from ._models import ApplicationGatewayBackendHealthServer # type: ignore
from ._models import ApplicationGatewayBackendHttpSettings # type: ignore
from ._models import ApplicationGatewayConnectionDraining # type: ignore
from ._models import ApplicationGatewayFirewallDisabledRuleGroup # type: ignore
from ._models import ApplicationGatewayFirewallRule # type: ignore
from ._models import ApplicationGatewayFirewallRuleGroup # type: ignore
from ._models import ApplicationGatewayFirewallRuleSet # type: ignore
from ._models import ApplicationGatewayFrontendIPConfiguration # type: ignore
from ._models import ApplicationGatewayFrontendPort # type: ignore
from ._models import ApplicationGatewayHttpListener # type: ignore
from ._models import ApplicationGatewayIPConfiguration # type: ignore
from ._models import ApplicationGatewayListResult # type: ignore
from ._models import ApplicationGatewayPathRule # type: ignore
from ._models import ApplicationGatewayProbe # type: ignore
from ._models import ApplicationGatewayRequestRoutingRule # type: ignore
from ._models import ApplicationGatewaySku # type: ignore
from ._models import ApplicationGatewaySslCertificate # type: ignore
from ._models import ApplicationGatewaySslPolicy # type: ignore
from ._models import ApplicationGatewayUrlPathMap # type: ignore
from ._models import ApplicationGatewayWebApplicationFirewallConfiguration # type: ignore
from ._models import AuthorizationListResult # type: ignore
from ._models import AzureAsyncOperationResult # type: ignore
from ._models import BGPCommunity # type: ignore
from ._models import BackendAddressPool # type: ignore
from ._models import BgpPeerStatus # type: ignore
from ._models import BgpPeerStatusListResult # type: ignore
from ._models import BgpServiceCommunity # type: ignore
from ._models import BgpServiceCommunityListResult # type: ignore
from ._models import BgpSettings # type: ignore
from ._models import ConnectionResetSharedKey # type: ignore
from ._models import ConnectionSharedKey # type: ignore
from ._models import ConnectivityDestination # type: ignore
from ._models import ConnectivityHop # type: ignore
from ._models import ConnectivityInformation # type: ignore
from ._models import ConnectivityIssue # type: ignore
from ._models import ConnectivityParameters # type: ignore
from ._models import ConnectivitySource # type: ignore
from ._models import DhcpOptions # type: ignore
from ._models import DnsNameAvailabilityResult # type: ignore
from ._models import EffectiveNetworkSecurityGroup # type: ignore
from ._models import EffectiveNetworkSecurityGroupAssociation # type: ignore
from ._models import EffectiveNetworkSecurityGroupListResult # type: ignore
from ._models import EffectiveNetworkSecurityRule # type: ignore
from ._models import EffectiveRoute # type: ignore
from ._models import EffectiveRouteListResult # type: ignore
from ._models import Error # type: ignore
from ._models import ErrorDetails # type: ignore
from ._models import ExpressRouteCircuit # type: ignore
from ._models import ExpressRouteCircuitArpTable # type: ignore
from ._models import ExpressRouteCircuitAuthorization # type: ignore
from ._models import ExpressRouteCircuitListResult # type: ignore
from ._models import ExpressRouteCircuitPeering # type: ignore
from ._models import ExpressRouteCircuitPeeringConfig # type: ignore
from ._models import ExpressRouteCircuitPeeringListResult # type: ignore
from ._models import ExpressRouteCircuitRoutesTable # type: ignore
from ._models import ExpressRouteCircuitRoutesTableSummary # type: ignore
from ._models import ExpressRouteCircuitServiceProviderProperties # type: ignore
from ._models import ExpressRouteCircuitSku # type: ignore
from ._models import ExpressRouteCircuitStats # type: ignore
from ._models import ExpressRouteCircuitsArpTableListResult # type: ignore
from ._models import ExpressRouteCircuitsRoutesTableListResult # type: ignore
from ._models import ExpressRouteCircuitsRoutesTableSummaryListResult # type: ignore
from ._models import ExpressRouteServiceProvider # type: ignore
from ._models import ExpressRouteServiceProviderBandwidthsOffered # type: ignore
from ._models import ExpressRouteServiceProviderListResult # type: ignore
from ._models import FlowLogInformation # type: ignore
from ._models import FlowLogStatusParameters # type: ignore
from ._models import FrontendIPConfiguration # type: ignore
from ._models import GatewayRoute # type: ignore
from ._models import GatewayRouteListResult # type: ignore
from ._models import IPAddressAvailabilityResult # type: ignore
from ._models import IPConfiguration # type: ignore
from ._models import InboundNatPool # type: ignore
from ._models import InboundNatRule # type: ignore
from ._models import IpsecPolicy # type: ignore
from ._models import Ipv6ExpressRouteCircuitPeeringConfig # type: ignore
from ._models import LoadBalancer # type: ignore
from ._models import LoadBalancerListResult # type: ignore
from ._models import LoadBalancingRule # type: ignore
from ._models import LocalNetworkGateway # type: ignore
from ._models import LocalNetworkGatewayListResult # type: ignore
from ._models import NetworkInterface # type: ignore
from ._models import NetworkInterfaceAssociation # type: ignore
from ._models import NetworkInterfaceDnsSettings # type: ignore
from ._models import NetworkInterfaceIPConfiguration # type: ignore
from ._models import NetworkInterfaceListResult # type: ignore
from ._models import NetworkSecurityGroup # type: ignore
from ._models import NetworkSecurityGroupListResult # type: ignore
from ._models import NetworkWatcher # type: ignore
from ._models import NetworkWatcherListResult # type: ignore
from ._models import NextHopParameters # type: ignore
from ._models import NextHopResult # type: ignore
from ._models import OutboundNatRule # type: ignore
from ._models import PacketCapture # type: ignore
from ._models import PacketCaptureFilter # type: ignore
from ._models import PacketCaptureListResult # type: ignore
from ._models import PacketCaptureParameters # type: ignore
from ._models import PacketCaptureQueryStatusResult # type: ignore
from ._models import PacketCaptureResult # type: ignore
from ._models import PacketCaptureResultProperties # type: ignore
from ._models import PacketCaptureStorageLocation # type: ignore
from ._models import PatchRouteFilter # type: ignore
from ._models import PatchRouteFilterRule # type: ignore
from ._models import Probe # type: ignore
from ._models import PublicIPAddress # type: ignore
from ._models import PublicIPAddressDnsSettings # type: ignore
from ._models import PublicIPAddressListResult # type: ignore
from ._models import QueryTroubleshootingParameters # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceNavigationLink # type: ignore
from ._models import RetentionPolicyParameters # type: ignore
from ._models import Route # type: ignore
from ._models import RouteFilter # type: ignore
from ._models import RouteFilterListResult # type: ignore
from ._models import RouteFilterRule # type: ignore
from ._models import RouteFilterRuleListResult # type: ignore
from ._models import RouteListResult # type: ignore
from ._models import RouteTable # type: ignore
from ._models import RouteTableListResult # type: ignore
from ._models import SecurityGroupNetworkInterface # type: ignore
from ._models import SecurityGroupViewParameters # type: ignore
from ._models import SecurityGroupViewResult # type: ignore
from ._models import SecurityRule # type: ignore
from ._models import SecurityRuleAssociations # type: ignore
from ._models import SecurityRuleListResult # type: ignore
from ._models import SubResource # type: ignore
from ._models import Subnet # type: ignore
from ._models import SubnetAssociation # type: ignore
from ._models import SubnetListResult # type: ignore
from ._models import Topology # type: ignore
from ._models import TopologyAssociation # type: ignore
from ._models import TopologyParameters # type: ignore
from ._models import TopologyResource # type: ignore
from ._models import TroubleshootingDetails # type: ignore
from ._models import TroubleshootingParameters # type: ignore
from ._models import TroubleshootingRecommendedActions # type: ignore
from ._models import TroubleshootingResult # type: ignore
from ._models import TunnelConnectionHealth # type: ignore
from ._models import Usage # type: ignore
from ._models import UsageName # type: ignore
from ._models import UsagesListResult # type: ignore
from ._models import VerificationIPFlowParameters # type: ignore
from ._models import VerificationIPFlowResult # type: ignore
from ._models import VirtualNetwork # type: ignore
from ._models import VirtualNetworkGateway # type: ignore
from ._models import VirtualNetworkGatewayConnection # type: ignore
from ._models import VirtualNetworkGatewayConnectionListResult # type: ignore
from ._models import VirtualNetworkGatewayIPConfiguration # type: ignore
from ._models import VirtualNetworkGatewayListResult # type: ignore
from ._models import VirtualNetworkGatewaySku # type: ignore
from ._models import VirtualNetworkListResult # type: ignore
from ._models import VirtualNetworkListUsageResult # type: ignore
from ._models import VirtualNetworkPeering # type: ignore
from ._models import VirtualNetworkPeeringListResult # type: ignore
from ._models import VirtualNetworkUsage # type: ignore
from ._models import VirtualNetworkUsageName # type: ignore
from ._models import VpnClientConfiguration # type: ignore
from ._models import VpnClientParameters # type: ignore
from ._models import VpnClientRevokedCertificate # type: ignore
from ._models import VpnClientRootCertificate # type: ignore
from ._network_management_client_enums import (
Access,
ApplicationGatewayBackendHealthServerHealth,
ApplicationGatewayCookieBasedAffinity,
ApplicationGatewayFirewallMode,
ApplicationGatewayOperationalState,
ApplicationGatewayProtocol,
ApplicationGatewayRequestRoutingRuleType,
ApplicationGatewaySkuName,
ApplicationGatewaySslProtocol,
ApplicationGatewayTier,
AssociationType,
AuthorizationUseStatus,
BgpPeerState,
ConnectionStatus,
DhGroup,
Direction,
EffectiveRouteSource,
EffectiveRouteState,
ExpressRouteCircuitPeeringAdvertisedPublicPrefixState,
ExpressRouteCircuitPeeringState,
ExpressRouteCircuitPeeringType,
ExpressRouteCircuitSkuFamily,
ExpressRouteCircuitSkuTier,
IPAllocationMethod,
IPVersion,
IkeEncryption,
IkeIntegrity,
IpsecEncryption,
IpsecIntegrity,
IssueType,
LoadDistribution,
NetworkOperationStatus,
NextHopType,
Origin,
PcError,
PcProtocol,
PcStatus,
PfsGroup,
ProbeProtocol,
ProcessorArchitecture,
Protocol,
ProvisioningState,
RouteFilterRuleType,
RouteNextHopType,
SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol,
ServiceProviderProvisioningState,
Severity,
TransportProtocol,
UsageUnit,
VirtualNetworkGatewayConnectionStatus,
VirtualNetworkGatewayConnectionType,
VirtualNetworkGatewaySkuName,
VirtualNetworkGatewaySkuTier,
VirtualNetworkGatewayType,
VirtualNetworkPeeringState,
VpnType,
)
__all__ = [
'AddressSpace',
'ApplicationGateway',
'ApplicationGatewayAuthenticationCertificate',
'ApplicationGatewayAvailableWafRuleSetsResult',
'ApplicationGatewayBackendAddress',
'ApplicationGatewayBackendAddressPool',
'ApplicationGatewayBackendHealth',
'ApplicationGatewayBackendHealthHttpSettings',
'ApplicationGatewayBackendHealthPool',
'ApplicationGatewayBackendHealthServer',
'ApplicationGatewayBackendHttpSettings',
'ApplicationGatewayConnectionDraining',
'ApplicationGatewayFirewallDisabledRuleGroup',
'ApplicationGatewayFirewallRule',
'ApplicationGatewayFirewallRuleGroup',
'ApplicationGatewayFirewallRuleSet',
'ApplicationGatewayFrontendIPConfiguration',
'ApplicationGatewayFrontendPort',
'ApplicationGatewayHttpListener',
'ApplicationGatewayIPConfiguration',
'ApplicationGatewayListResult',
'ApplicationGatewayPathRule',
'ApplicationGatewayProbe',
'ApplicationGatewayRequestRoutingRule',
'ApplicationGatewaySku',
'ApplicationGatewaySslCertificate',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayUrlPathMap',
'ApplicationGatewayWebApplicationFirewallConfiguration',
'AuthorizationListResult',
'AzureAsyncOperationResult',
'BGPCommunity',
'BackendAddressPool',
'BgpPeerStatus',
'BgpPeerStatusListResult',
'BgpServiceCommunity',
'BgpServiceCommunityListResult',
'BgpSettings',
'ConnectionResetSharedKey',
'ConnectionSharedKey',
'ConnectivityDestination',
'ConnectivityHop',
'ConnectivityInformation',
'ConnectivityIssue',
'ConnectivityParameters',
'ConnectivitySource',
'DhcpOptions',
'DnsNameAvailabilityResult',
'EffectiveNetworkSecurityGroup',
'EffectiveNetworkSecurityGroupAssociation',
'EffectiveNetworkSecurityGroupListResult',
'EffectiveNetworkSecurityRule',
'EffectiveRoute',
'EffectiveRouteListResult',
'Error',
'ErrorDetails',
'ExpressRouteCircuit',
'ExpressRouteCircuitArpTable',
'ExpressRouteCircuitAuthorization',
'ExpressRouteCircuitListResult',
'ExpressRouteCircuitPeering',
'ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitPeeringListResult',
'ExpressRouteCircuitRoutesTable',
'ExpressRouteCircuitRoutesTableSummary',
'ExpressRouteCircuitServiceProviderProperties',
'ExpressRouteCircuitSku',
'ExpressRouteCircuitStats',
'ExpressRouteCircuitsArpTableListResult',
'ExpressRouteCircuitsRoutesTableListResult',
'ExpressRouteCircuitsRoutesTableSummaryListResult',
'ExpressRouteServiceProvider',
'ExpressRouteServiceProviderBandwidthsOffered',
'ExpressRouteServiceProviderListResult',
'FlowLogInformation',
'FlowLogStatusParameters',
'FrontendIPConfiguration',
'GatewayRoute',
'GatewayRouteListResult',
'IPAddressAvailabilityResult',
'IPConfiguration',
'InboundNatPool',
'InboundNatRule',
'IpsecPolicy',
'Ipv6ExpressRouteCircuitPeeringConfig',
'LoadBalancer',
'LoadBalancerListResult',
'LoadBalancingRule',
'LocalNetworkGateway',
'LocalNetworkGatewayListResult',
'NetworkInterface',
'NetworkInterfaceAssociation',
'NetworkInterfaceDnsSettings',
'NetworkInterfaceIPConfiguration',
'NetworkInterfaceListResult',
'NetworkSecurityGroup',
'NetworkSecurityGroupListResult',
'NetworkWatcher',
'NetworkWatcherListResult',
'NextHopParameters',
'NextHopResult',
'OutboundNatRule',
'PacketCapture',
'PacketCaptureFilter',
'PacketCaptureListResult',
'PacketCaptureParameters',
'PacketCaptureQueryStatusResult',
'PacketCaptureResult',
'PacketCaptureResultProperties',
'PacketCaptureStorageLocation',
'PatchRouteFilter',
'PatchRouteFilterRule',
'Probe',
'PublicIPAddress',
'PublicIPAddressDnsSettings',
'PublicIPAddressListResult',
'QueryTroubleshootingParameters',
'Resource',
'ResourceNavigationLink',
'RetentionPolicyParameters',
'Route',
'RouteFilter',
'RouteFilterListResult',
'RouteFilterRule',
'RouteFilterRuleListResult',
'RouteListResult',
'RouteTable',
'RouteTableListResult',
'SecurityGroupNetworkInterface',
'SecurityGroupViewParameters',
'SecurityGroupViewResult',
'SecurityRule',
'SecurityRuleAssociations',
'SecurityRuleListResult',
'SubResource',
'Subnet',
'SubnetAssociation',
'SubnetListResult',
'Topology',
'TopologyAssociation',
'TopologyParameters',
'TopologyResource',
'TroubleshootingDetails',
'TroubleshootingParameters',
'TroubleshootingRecommendedActions',
'TroubleshootingResult',
'TunnelConnectionHealth',
'Usage',
'UsageName',
'UsagesListResult',
'VerificationIPFlowParameters',
'VerificationIPFlowResult',
'VirtualNetwork',
'VirtualNetworkGateway',
'VirtualNetworkGatewayConnection',
'VirtualNetworkGatewayConnectionListResult',
'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewayListResult',
'VirtualNetworkGatewaySku',
'VirtualNetworkListResult',
'VirtualNetworkListUsageResult',
'VirtualNetworkPeering',
'VirtualNetworkPeeringListResult',
'VirtualNetworkUsage',
'VirtualNetworkUsageName',
'VpnClientConfiguration',
'VpnClientParameters',
'VpnClientRevokedCertificate',
'VpnClientRootCertificate',
'Access',
'ApplicationGatewayBackendHealthServerHealth',
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayFirewallMode',
'ApplicationGatewayOperationalState',
'ApplicationGatewayProtocol',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewaySkuName',
'ApplicationGatewaySslProtocol',
'ApplicationGatewayTier',
'AssociationType',
'AuthorizationUseStatus',
'BgpPeerState',
'ConnectionStatus',
'DhGroup',
'Direction',
'EffectiveRouteSource',
'EffectiveRouteState',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitSkuFamily',
'ExpressRouteCircuitSkuTier',
'IPAllocationMethod',
'IPVersion',
'IkeEncryption',
'IkeIntegrity',
'IpsecEncryption',
'IpsecIntegrity',
'IssueType',
'LoadDistribution',
'NetworkOperationStatus',
'NextHopType',
'Origin',
'PcError',
'PcProtocol',
'PcStatus',
'PfsGroup',
'ProbeProtocol',
'ProcessorArchitecture',
'Protocol',
'ProvisioningState',
'RouteFilterRuleType',
'RouteNextHopType',
'SecurityRuleAccess',
'SecurityRuleDirection',
'SecurityRuleProtocol',
'ServiceProviderProvisioningState',
'Severity',
'TransportProtocol',
'UsageUnit',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'VirtualNetworkGatewayType',
'VirtualNetworkPeeringState',
'VpnType',
]
| |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from cStringIO import StringIO
from dateutil.parser import parse
from functools import partial
from gzip import GzipFile
import json
import logging
import math
from multiprocessing import cpu_count, Pool
import os
import tempfile
import time
import sqlite3
import boto3
from botocore.client import Config
log = logging.getLogger('c7n_traildb')
def dump(o):
return json.dumps(o)
def load(s):
return json.loads(s)
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
def process_trail_set(
object_set, map_records, reduce_results=None, trail_bucket=None):
s3 = boto3.Session().client('s3', config=Config(signature_version='s3v4'))
previous = None
for o in object_set:
body = s3.get_object(Key=o['Key'], Bucket=trail_bucket)['Body']
fh = GzipFile(fileobj=StringIO(body.read()))
data = json.load(fh)
s = map_records(data['Records'])
if reduce_results:
previous = reduce_results(s, previous)
return previous
class TrailDB(object):
def __init__(self, path):
self.path = path
self.conn = sqlite3.connect(self.path)
self.cursor = self.conn.cursor()
self._init()
def _init(self):
self.cursor.execute('''
create table if not exists events (
event_date datetime,
event_name varchar(128),
event_source varchar(128),
user_agent varchar(128),
request_id varchar(32),
client_ip varchar(32),
user_id varchar(128),
error_code varchar(256),
error text
)''')
# omit due to size
# response text,
# request text,
# user text,
def insert(self, records):
self.cursor.executemany(
"insert into events values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
records)
def flush(self):
self.conn.commit()
def reduce_records(x, y):
if y is None:
return x
elif x is None:
return y
y.extend(x)
return y
# STOP = 42
#
# def store_records(output, q):
# db = TrailDB(output)
# while True:
# results = q.get()
# if results == STOP:
# return
# for r in results:
# for fpath in r:
# with open(fpath) as fh:
# db.insert(load(fh.read()))
# os.remove(fpath)
# db.flush()
def process_record_set(object_processor, q):
def f(*args, **kw):
r = object_processor(*args, **kw)
if r:
q.put(r)
return r
def process_records(records,
uid_filter=None,
event_filter=None,
service_filter=None,
not_service_filter=None,
data_dir=None):
user_records = []
for r in records:
if not_service_filter and r['eventSource'] == not_service_filter:
continue
utype = r['userIdentity'].get('type', None)
if utype == 'Root':
uid = 'root'
elif utype == 'SAMLUser':
uid = r['userIdentity']['userName']
elif utype is None and r['userIdentity']['invokedBy'] == 'AWS Internal':
uid = r['userIdentity']['invokedBy']
else:
uid = r['userIdentity'].get('arn', '')
if uid_filter and uid_filter not in uid.lower():
continue
elif event_filter and not r['eventName'] == event_filter:
continue
elif service_filter and not r['eventSource'] == service_filter:
continue
user_records.append((
r['eventTime'],
r['eventName'],
r['eventSource'],
r.get('userAgent', ''),
r.get('requestID', ''),
r.get('sourceIPAddress', ''),
uid,
# TODO make this optional, for now omit for size
# json.dumps(r['requestParameters']),
# json.dumps(r['responseElements']),
# json.dumps(r['userIdentity']),
r.get('errorCode', None),
r.get('errorMessage', None)
))
if data_dir:
if not user_records:
return
# Spool to temporary files to get out of mem
fh = tempfile.NamedTemporaryFile(dir=data_dir, delete=False)
fh.write(dump(user_records))
fh.flush()
fh.close()
return [fh.name]
return user_records
def process_bucket(
bucket_name, prefix,
output=None, uid_filter=None, event_filter=None,
service_filter=None, not_service_filter=None, data_dir=None):
s3 = boto3.Session().client(
's3', config=Config(signature_version='s3v4'))
paginator = s3.get_paginator('list_objects')
# PyPy has some memory leaks.... :-(
pool = Pool(maxtasksperchild=10)
t = time.time()
object_count = object_size = 0
log.info("Processing:%d cloud-trail %s" % (
cpu_count(),
prefix))
record_processor = partial(
process_records,
uid_filter=uid_filter,
event_filter=event_filter,
service_filter=service_filter,
not_service_filter=not_service_filter,
data_dir=data_dir)
object_processor = partial(
process_trail_set,
map_records=record_processor,
reduce_results=reduce_records,
trail_bucket=bucket_name)
db = TrailDB(output)
bsize = math.ceil(1000 / float(cpu_count()))
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
objects = page.get('Contents', ())
object_count += len(objects)
object_size += sum([o['Size'] for o in objects])
pt = time.time()
if pool:
results = pool.map(object_processor, chunks(objects, bsize))
else:
results = map(object_processor, chunks(objects, bsize))
st = time.time()
log.info("Loaded page time:%0.2fs", st - pt)
for r in results:
for fpath in r:
with open(fpath) as fh:
db.insert(load(fh.read()))
os.remove(fpath)
db.flush()
l = t
t = time.time()
log.info("Stored page time:%0.2fs", t - st)
log.info(
"Processed paged time:%0.2f size:%s count:%s" % (
t - l, object_size, object_count))
if objects:
log.info('Last Page Key: %s', objects[-1]['Key'])
def get_bucket_path(options):
prefix = "AWSLogs/%(account)s/CloudTrail/%(region)s/" % {
'account': options.account, 'region': options.region}
if options.prefix:
prefix = "%s/%s" % (options.prefix.strip('/'), prefix)
if options.day:
date = parse(options.day)
date_prefix = date.strftime("%Y/%m/%d/")
if options.month:
date = parse(options.month)
date_prefix = date.strftime("%Y/%m/")
if date_prefix:
prefix += date_prefix
return prefix
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--bucket", required=True)
parser.add_argument("--prefix", default="")
parser.add_argument("--account", required=True)
parser.add_argument("--user")
parser.add_argument("--event")
parser.add_argument("--source")
parser.add_argument("--not-source")
parser.add_argument("--day")
parser.add_argument("--month")
parser.add_argument("--tmpdir", default="/tmp/traildb")
parser.add_argument("--region", default="us-east-1")
parser.add_argument("--output", default="results.db")
return parser
def main():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
parser = setup_parser()
options = parser.parse_args()
if options.tmpdir and not os.path.exists(options.tmpdir):
os.makedirs(options.tmpdir)
prefix = get_bucket_path(options)
process_bucket(
options.bucket,
prefix,
options.output,
options.user,
options.event,
options.source,
options.not_source,
options.tmpdir
)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""SMBIOS/DMI module."""
import bits
import bitfields
import redirect
import struct
import uuid
import unpack
import ttypager
import sys
class SMBIOS(unpack.Struct):
def __new__(cls):
if sys.platform == "BITS-EFI":
import efi
sm_ptr = efi.system_table.ConfigurationTableDict.get(efi.SMBIOS_TABLE_GUID)
else:
mem = bits.memory(0xF0000, 0x10000)
signature = struct.unpack("<I", "_SM_")[0]
for offset in range(0, len(mem), 16):
if struct.unpack_from("I", mem, offset)[0] == signature:
entry_point_length = struct.unpack_from("B", mem, offset + 5)[0]
csum = sum(map(ord, mem[offset:offset + entry_point_length])) & 0xff
if csum == 0:
sm_ptr = bits.memory_addr(mem) + offset
break
else:
return None
if not sm_ptr:
return None
sm = super(SMBIOS, cls).__new__(cls)
sm._header_memory = bits.memory(sm_ptr, 0x1f)
return sm
def __init__(self):
super(SMBIOS, self).__init__()
u = unpack.Unpackable(self._header_memory)
self.add_field('header', Header(u))
self._structure_memory = bits.memory(self.header.structure_table_address, self.header.structure_table_length)
u = unpack.Unpackable(self._structure_memory)
self.add_field('structures', unpack.unpack_all(u, _smbios_structures, self), unpack.format_each("\n\n{!r}"))
class Header(unpack.Struct):
def __new__(cls, u):
return super(Header, cls).__new__(cls)
def __init__(self, u):
super(Header, self).__init__()
self.raw_data = u.unpack_rest()
u = unpack.Unpackable(self.raw_data)
self.add_field('anchor_string', u.unpack_one("4s"))
self.add_field('checksum', u.unpack_one("B"))
self.add_field('length', u.unpack_one("B"))
self.add_field('major_version', u.unpack_one("B"))
self.add_field('minor_version', u.unpack_one("B"))
self.add_field('max_structure_size', u.unpack_one("<H"))
self.add_field('entry_point_revision', u.unpack_one("B"))
self.add_field('formatted_area', u.unpack_one("5s"))
self.add_field('intermediate_anchor_string', u.unpack_one("5s"))
self.add_field('intermediate_checksum', u.unpack_one("B"))
self.add_field('structure_table_length', u.unpack_one("<H"))
self.add_field('structure_table_address', u.unpack_one("<I"))
self.add_field('number_structures', u.unpack_one("<H"))
self.add_field('bcd_revision', u.unpack_one("B"))
if not u.at_end():
self.add_field('data', u.unpack_rest())
class SmbiosBaseStructure(unpack.Struct):
def __new__(cls, u, sm):
t = u.unpack_peek_one("B")
if cls.smbios_structure_type is not None and t != cls.smbios_structure_type:
return None
return super(SmbiosBaseStructure, cls).__new__(cls)
def __init__(self, u, sm):
super(SmbiosBaseStructure, self).__init__()
self.start_offset = u.offset
length = u.unpack_peek_one("<xB")
self.raw_data = u.unpack_raw(length)
self.u = unpack.Unpackable(self.raw_data)
self.strings_offset = u.offset
def unpack_string():
return "".join(iter(lambda: u.unpack_one("c"), "\x00"))
strings = list(iter(unpack_string, ""))
if not strings:
u.skip(1)
self.strings_length = u.offset - self.strings_offset
self.raw_strings = str(bits.memory(sm.header.structure_table_address + self.strings_offset, self.strings_length))
if len(strings):
self.strings = strings
self.add_field('type', self.u.unpack_one("B"))
self.add_field('length', self.u.unpack_one("B"))
self.add_field('handle', self.u.unpack_one("<H"))
def fini(self):
if not self.u.at_end():
self.add_field('data', self.u.unpack_rest())
del self.u
def fmtstr(self, i):
"""Format the specified index and the associated string"""
return "{} '{}'".format(i, self.getstr(i))
def getstr(self, i):
"""Get the string associated with the given index"""
if i == 0:
return "(none)"
if not hasattr(self, "strings"):
return "(error: structure has no strings)"
if i > len(self.strings):
return "(error: string index out of range)"
return self.strings[i - 1]
class BIOSInformation(SmbiosBaseStructure):
smbios_structure_type = 0
def __init__(self, u, sm):
super(BIOSInformation, self).__init__(u, sm)
u = self.u
try:
self.add_field('vendor', u.unpack_one("B"), self.fmtstr)
self.add_field('version', u.unpack_one("B"), self.fmtstr)
self.add_field('starting_address_segment', u.unpack_one("<H"))
self.add_field('release_date', u.unpack_one("B"), self.fmtstr)
self.add_field('rom_size', u.unpack_one("B"))
self.add_field('characteristics', u.unpack_one("<Q"))
minor_version_str = str(sm.header.minor_version) # 34 is .34, 4 is .4, 41 is .41; compare ASCIIbetically to compare initial digits rather than numeric value
if (sm.header.major_version, minor_version_str) >= (2,"4"):
characteristic_bytes = 2
else:
characteristic_bytes = self.length - 0x12
self.add_field('characteristics_extensions', [u.unpack_one("B") for b in range(characteristic_bytes)])
if (sm.header.major_version, minor_version_str) >= (2,"4"):
self.add_field('major_release', u.unpack_one("B"))
self.add_field('minor_release', u.unpack_one("B"))
self.add_field('ec_major_release', u.unpack_one("B"))
self.add_field('ec_minor_release', u.unpack_one("B"))
except:
self.decode_failure = True
print "Error parsing BIOSInformation"
import traceback
traceback.print_exc()
self.fini()
class SystemInformation(SmbiosBaseStructure):
smbios_structure_type = 1
def __init__(self, u, sm):
super(SystemInformation, self).__init__(u, sm)
u = self.u
try:
self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
self.add_field('product_name', u.unpack_one("B"), self.fmtstr)
self.add_field('version', u.unpack_one("B"), self.fmtstr)
self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
if self.length > 0x8:
self.add_field('uuid', uuid.UUID(bytes_le=u.unpack_one("16s")))
wakeup_types = {
0: 'Reserved',
1: 'Other',
2: 'Unknown',
3: 'APM Timer',
4: 'Modem Ring',
5: 'LAN Remote',
6: 'Power Switch',
7: 'PCI PME#',
8: 'AC Power Restored'
}
self.add_field('wakeup_type', u.unpack_one("B"), unpack.format_table("{}", wakeup_types))
if self.length > 0x19:
self.add_field('sku_number', u.unpack_one("B"), self.fmtstr)
self.add_field('family', u.unpack_one("B"), self.fmtstr)
except:
self.decode_failure = True
print "Error parsing SystemInformation"
import traceback
traceback.print_exc()
self.fini()
_board_types = {
1: 'Unknown',
2: 'Other',
3: 'Server Blade',
4: 'Connectivity Switch',
5: 'System Management Module',
6: 'Processor Module',
7: 'I/O Module',
8: 'Memory Module',
9: 'Daughter Board',
0xA: 'Motherboard',
0xB: 'Processor/Memory Module',
0xC: 'Processor/IO Module',
0xD: 'Interconnect Board'
}
class BaseboardInformation(SmbiosBaseStructure):
smbios_structure_type = 2
def __init__(self, u, sm):
super(BaseboardInformation, self).__init__(u, sm)
u = self.u
try:
self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
self.add_field('product', u.unpack_one("B"), self.fmtstr)
self.add_field('version', u.unpack_one("B"), self.fmtstr)
self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
if self.length > 0x8:
self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
if self.length > 0x9:
self.add_field('feature_flags', u.unpack_one("B"))
self.add_field('hosting_board', bool(bitfields.getbits(self.feature_flags, 0)), "feature_flags[0]={}")
self.add_field('requires_daughter_card', bool(bitfields.getbits(self.feature_flags, 1)), "feature_flags[1]={}")
self.add_field('removable', bool(bitfields.getbits(self.feature_flags, 2)), "feature_flags[2]={}")
self.add_field('replaceable', bool(bitfields.getbits(self.feature_flags, 3)), "feature_flags[3]={}")
self.add_field('hot_swappable', bool(bitfields.getbits(self.feature_flags, 4)), "feature_flags[4]={}")
if self.length > 0xA:
self.add_field('location', u.unpack_one("B"), self.fmtstr)
if self.length > 0xB:
self.add_field('chassis_handle', u.unpack_one("<H"))
if self.length > 0xD:
self.add_field('board_type', u.unpack_one("B"), unpack.format_table("{}", _board_types))
if self.length > 0xE:
self.add_field('handle_count', u.unpack_one("B"))
if self.handle_count > 0:
self.add_field('contained_object_handles', tuple(u.unpack_one("<H") for i in range(self.handle_count)))
except:
self.decode_failure = True
print "Error parsing BaseboardInformation"
import traceback
traceback.print_exc()
self.fini()
class SystemEnclosure(SmbiosBaseStructure):
smbios_structure_type = 3
def __init__(self, u, sm):
super(SystemEnclosure, self).__init__(u, sm)
u = self.u
try:
self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
self.add_field('enumerated_type', u.unpack_one("B"))
self.add_field('chassis_lock_present', bool(bitfields.getbits(self.enumerated_type, 7)), "type[7]={}")
board_types = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Desktop',
0x04: 'Low Profile Desktop',
0x05: 'Pizza Box',
0x06: 'Mini Tower',
0x07: 'Tower',
0x08: 'Portable',
0x09: 'Laptop',
0x0A: 'Notebook',
0x0B: 'Hand Held',
0x0C: 'Docking Station',
0x0D: 'All in One',
0x0E: 'Sub Notebook',
0x0F: 'Space-saving',
0x10: 'Lunch Box',
0x11: 'Main Server Chassis',
0x12: 'Expansion Chassis',
0x13: 'SubChassis',
0x14: 'Bus Expansion Chassis',
0x15: 'Peripheral Chassis',
0x16: 'RAID Chassis',
0x17: 'Rack Mount Chassis',
0x18: 'Sealed-case PC',
0x19: 'Multi-system chassis W',
0x1A: 'Compact PCI',
0x1B: 'Advanced TCA',
0x1C: 'Blade',
0x1D: 'Blade Enclosure',
}
self.add_field('system_enclosure_type', bitfields.getbits(self.enumerated_type, 6, 0), unpack.format_table("enumerated_type[6:0]={}", board_types))
self.add_field('version', u.unpack_one("B"), self.fmtstr)
self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
minor_version_str = str(sm.header.minor_version) # 34 is .34, 4 is .4, 41 is .41; compare ASCIIbetically to compare initial digits rather than numeric value
if self.length > 9:
chassis_states = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Safe',
0x04: 'Warning',
0x05: 'Critical',
0x06: 'Non-recoverable',
}
self.add_field('bootup_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
self.add_field('power_supply_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
self.add_field('thermal_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
security_states = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'None',
0x04: 'External interface locked out',
0x05: 'External interface enabled',
}
self.add_field('security_status', u.unpack_one("B"), unpack.format_table("{}", security_states))
if self.length > 0xd:
self.add_field('oem_defined', u.unpack_one("<I"))
if self.length > 0x11:
self.add_field('height', u.unpack_one("B"))
self.add_field('num_power_cords', u.unpack_one("B"))
self.add_field('contained_element_count', u.unpack_one("B"))
self.add_field('contained_element_length', u.unpack_one("B"))
if getattr(self, 'contained_element_count', 0):
self.add_field('contained_elements', tuple(SystemEnclosureContainedElement(u, self.contained_element_length) for i in range(self.contained_element_count)))
if self.length > (0x15 + (getattr(self, 'contained_element_count', 0) * getattr(self, 'contained_element_length', 0))):
self.add_field('sku_number', u.unpack_one("B"), self.fmtstr)
except:
self.decode_failure = True
print "Error parsing SystemEnclosure"
import traceback
traceback.print_exc()
self.fini()
class SystemEnclosureContainedElement(unpack.Struct):
def __init__(self, u, length):
super(SystemEnclosureContainedElement, self).__init__()
self.start_offset = u.offset
self.raw_data = u.unpack_raw(length)
self.u = unpack.Unpackable(self.raw_data)
u = self.u
self.add_field('contained_element_type', u.unpack_one("B"))
type_selections = {
0: 'SMBIOS baseboard type enumeration',
1: 'SMBIOS structure type enumeration',
}
self.add_field('type_select', bitfields.getbits(self.type, 7), unpack.format_table("contained_element_type[7]={}", type_selections))
self.add_field('type', bitfields.getbits(self.type, 6, 0))
if self.type_select == 0:
self.add_field('smbios_board_type', self.type, unpack.format_table("{}", _board_types))
else:
self.add_field('smbios_structure_type', self.type)
self.add_field('minimum', u.unpack_one("B"))
self.add_field('maximum', u.unpack_one("B"))
if not u.at_end():
self.add_field('data', u.unpack_rest())
del self.u
class ProcessorInformation(SmbiosBaseStructure):
smbios_structure_type = 4
def __init__(self, u, sm):
super(ProcessorInformation, self).__init__(u, sm)
u = self.u
try:
self.add_field('socket_designation', u.unpack_one("B"), self.fmtstr)
processor_types = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Central Processor',
0x04: 'Math Processor',
0x05: 'DSP Processor',
0x06: 'Video Processor',
}
self.add_field('processor_type', u.unpack_one("B"), unpack.format_table("{}", processor_types))
self.add_field('processor_family', u.unpack_one("B"))
self.add_field('processor_manufacturer', u.unpack_one("B"), self.fmtstr)
self.add_field('processor_id', u.unpack_one("<Q"))
self.add_field('processor_version', u.unpack_one("B"), self.fmtstr)
self.add_field('voltage', u.unpack_one("B"))
self.add_field('external_clock', u.unpack_one("<H"))
self.add_field('max_speed', u.unpack_one("<H"))
self.add_field('current_speed', u.unpack_one("<H"))
self.add_field('status', u.unpack_one("B"))
processor_upgrades = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Daughter Board',
0x04: 'ZIF Socket',
0x05: 'Replaceable Piggy Back',
0x06: 'None',
0x07: 'LIF Socket',
0x08: 'Slot 1',
0x09: 'Slot 2',
0x0A: '370-pin socket',
0x0B: 'Slot A',
0x0C: 'Slot M',
0x0D: 'Socket 423',
0x0E: 'Socket A (Socket 462)',
0x0F: 'Socket 478',
0x10: 'Socket 754',
0x11: 'Socket 940',
0x12: 'Socket 939',
0x13: 'Socket mPGA604',
0x14: 'Socket LGA771',
0x15: 'Socket LGA775',
0x16: 'Socket S1',
0x17: 'Socket AM2',
0x18: 'Socket F (1207)',
0x19: 'Socket LGA1366',
0x1A: 'Socket G34',
0x1B: 'Socket AM3',
0x1C: 'Socket C32',
0x1D: 'Socket LGA1156',
0x1E: 'Socket LGA1567',
0x1F: 'Socket PGA988A',
0x20: 'Socket BGA1288',
0x21: 'Socket rPGA988B',
0x22: 'Socket BGA1023',
0x23: 'Socket BGA1224',
0x24: 'Socket BGA1155',
0x25: 'Socket LGA1356',
0x26: 'Socket LGA2011',
0x27: 'Socket FS1',
0x28: 'Socket FS2',
0x29: 'Socket FM1',
0x2A: 'Socket FM2',
}
self.add_field('processor_upgrade', u.unpack_one("B"), unpack.format_table("{}", processor_upgrades))
if self.length > 0x1A:
self.add_field('l1_cache_handle', u.unpack_one("<H"))
self.add_field('l2_cache_handle', u.unpack_one("<H"))
self.add_field('l3_cache_handle', u.unpack_one("<H"))
if self.length > 0x20:
self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
self.add_field('part_number', u.unpack_one("B"), self.fmtstr)
if self.length > 0x24:
self.add_field('core_count', u.unpack_one("B"))
self.add_field('core_enabled', u.unpack_one("B"))
self.add_field('thread_count', u.unpack_one("B"))
self.add_field('processor_characteristics', u.unpack_one("<H"))
if self.length > 0x28:
self.add_field('processor_family_2', u.unpack_one("<H"))
except:
self.decode_failure = True
print "Error parsing Processor Information"
import traceback
traceback.print_exc()
self.fini()
class PortConnectorInfo(SmbiosBaseStructure):
smbios_structure_type = 8
def __init__(self, u, sm):
super(PortConnectorInfo, self).__init__(u, sm)
u = self.u
try:
self.add_field('internal_reference_designator', u.unpack_one("B"), self.fmtstr)
connector_types = {
0x00: 'None',
0x01: 'Centronics',
0x02: 'Mini Centronics',
0x03: 'Proprietary',
0x04: 'DB-25 pin male',
0x05: 'DB-25 pin female',
0x06: 'DB-15 pin male',
0x07: 'DB-15 pin female',
0x08: 'DB-9 pin male',
0x09: 'DB-9 pin female',
0x0A: 'RJ-11',
0x0B: 'RJ-45',
0x0C: '50-pin MiniSCSI',
0x0D: 'Mini-DIN',
0x0E: 'Micro-DIN',
0x0F: 'PS/2',
0x10: 'Infrared',
0x11: 'HP-HIL',
0x12: 'Access Bus (USB)',
0x13: 'SSA SCSI',
0x14: 'Circular DIN-8 male',
0x15: 'Circular DIN-8 female',
0x16: 'On Board IDE',
0x17: 'On Board Floppy',
0x18: '9-pin Dual Inline (pin 10 cut)',
0x19: '25-pin Dual Inline (pin 26 cut)',
0x1A: '50-pin Dual Inline',
0x1B: '68-pin Dual Inline',
0x1C: 'On Board Sound Input from CD-ROM',
0x1D: 'Mini-Centronics Type-14',
0x1E: 'Mini-Centronics Type-26',
0x1F: 'Mini-jack (headphones)',
0x20: 'BNC',
0x21: '1394',
0x22: 'SAS/SATA Plug Receptacle',
0xA0: 'PC-98',
0xA1: 'PC-98Hireso',
0xA2: 'PC-H98',
0xA3: 'PC-98Note',
0xA4: 'PC-98Full',
0xFF: 'Other',
}
self.add_field('internal_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types))
self.add_field('external_reference_designator', u.unpack_one("B"), self.fmtstr)
self.add_field('external_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types))
port_types = {
0x00: 'None',
0x01: 'Parallel Port XT/AT Compatible',
0x02: 'Parallel Port PS/2',
0x03: 'Parallel Port ECP',
0x04: 'Parallel Port EPP',
0x05: 'Parallel Port ECP/EPP',
0x06: 'Serial Port XT/AT Compatible',
0x07: 'Serial Port 16450 Compatible',
0x08: 'Serial Port 16550 Compatible',
0x09: 'Serial Port 16550A Compatible',
0x0A: 'SCSI Port',
0x0B: 'MIDI Port',
0x0C: 'Joy Stick Port',
0x0D: 'Keyboard Port',
0x0E: 'Mouse Port',
0x0F: 'SSA SCSI',
0x10: 'USB',
0x11: 'FireWire (IEEE P1394)',
0x12: 'PCMCIA Type I2',
0x13: 'PCMCIA Type II',
0x14: 'PCMCIA Type III',
0x15: 'Cardbus',
0x16: 'Access Bus Port',
0x17: 'SCSI II',
0x18: 'SCSI Wide',
0x19: 'PC-98',
0x1A: 'PC-98-Hireso',
0x1B: 'PC-H98',
0x1C: 'Video Port',
0x1D: 'Audio Port',
0x1E: 'Modem Port',
0x1F: 'Network Port',
0x20: 'SATA',
0x21: 'SAS',
0xA0: '8251 Compatible',
0xA1: '8251 FIFO Compatible',
0xFF: 'Other',
}
self.add_field('port_type', u.unpack_one("B"), unpack.format_table("{}", port_types))
except:
self.decodeFailure = True
print "Error parsing PortConnectorInfo"
import traceback
traceback.print_exc()
self.fini()
class SystemSlots(SmbiosBaseStructure):
smbios_structure_type = 9
def __init__(self, u, sm):
super(SystemSlots, self).__init__(u, sm)
u = self.u
try:
self.add_field('designation', u.unpack_one("B"), self.fmtstr)
_slot_types = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'ISA',
0x04: 'MCA',
0x05: 'EISA',
0x06: 'PCI',
0x07: 'PC Card (PCMCIA)',
0x08: 'VL-VESA',
0x09: 'Proprietary',
0x0A: 'Processor Card Slot',
0x0B: 'Proprietary Memory Card Slot',
0x0C: 'I/O Riser Card Slot',
0x0D: 'NuBus',
0x0E: 'PCI 66MHz Capable',
0x0F: 'AGP',
0x10: 'AGP 2X',
0x11: 'AGP 4X',
0x12: 'PCI-X',
0x13: 'AGP 8X',
0xA0: 'PC-98/C20',
0xA1: 'PC-98/C24',
0xA2: 'PC-98/E',
0xA3: 'PC-98/Local Bus',
0xA4: 'PC-98/Card',
0xA5: 'PCI Express',
0xA6: 'PCI Express x1',
0xA7: 'PCI Express x2',
0xA8: 'PCI Express x4',
0xA9: 'PCI Express x8',
0xAA: 'PCI Express x16',
0xAB: 'PCI Express Gen 2',
0xAC: 'PCI Express Gen 2 x1',
0xAD: 'PCI Express Gen 2 x2',
0xAE: 'PCI Express Gen 2 x4',
0xAF: 'PCI Express Gen 2 x8',
0xB0: 'PCI Express Gen 2 x16',
0xB1: 'PCI Express Gen 3',
0xB2: 'PCI Express Gen 3 x1',
0xB3: 'PCI Express Gen 3 x2',
0xB4: 'PCI Express Gen 3 x4',
0xB5: 'PCI Express Gen 3 x8',
0xB6: 'PCI Express Gen 3 x16',
}
self.add_field('slot_type', u.unpack_one("B"), unpack.format_table("{}", _slot_types))
_slot_data_bus_widths = {
0x01: 'Other',
0x02: 'Unknown',
0x03: '8 bit',
0x04: '16 bit',
0x05: '32 bit',
0x06: '64 bit',
0x07: '128 bit',
0x08: '1x or x1',
0x09: '2x or x2',
0x0A: '4x or x4',
0x0B: '8x or x8',
0x0C: '12x or x12',
0x0D: '16x or x16',
0x0E: '32x or x32',
}
self.add_field('slot_data_bus_width', u.unpack_one('B'), unpack.format_table("{}", _slot_data_bus_widths))
_current_usages = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Available',
0x04: 'In use',
}
self.add_field('current_usage', u.unpack_one('B'), unpack.format_table("{}", _current_usages))
_slot_lengths = {
0x01: 'Other',
0x02: 'Unknown',
0x03: 'Short Length',
0x04: 'Long Length',
}
self.add_field('slot_length', u.unpack_one('B'), unpack.format_table("{}", _slot_lengths))
self.add_field('slot_id', u.unpack_one('<H'))
self.add_field('characteristics1', u.unpack_one('B'))
self.add_field('characteristics_unknown', bool(bitfields.getbits(self.characteristics1, 0)), "characteristics1[0]={}")
self.add_field('provides_5_0_volts', bool(bitfields.getbits(self.characteristics1, 1)), "characteristics1[1]={}")
self.add_field('provides_3_3_volts', bool(bitfields.getbits(self.characteristics1, 2)), "characteristics1[2]={}")
self.add_field('shared_slot', bool(bitfields.getbits(self.characteristics1, 3)), "characteristics1[3]={}")
self.add_field('supports_pc_card_16', bool(bitfields.getbits(self.characteristics1, 4)), "characteristics1[4]={}")
self.add_field('supports_cardbus', bool(bitfields.getbits(self.characteristics1, 5)), "characteristics1[5]={}")
self.add_field('supports_zoom_video', bool(bitfields.getbits(self.characteristics1, 6)), "characteristics1[6]={}")
self.add_field('supports_modem_ring_resume', bool(bitfields.getbits(self.characteristics1, 7)), "characteristics1[7]={}")
if self.length > 0x0C:
self.add_field('characteristics2', u.unpack_one('B'))
self.add_field('supports_PME', bool(bitfields.getbits(self.characteristics2, 0)), "characteristics2[0]={}")
self.add_field('supports_hot_plug', bool(bitfields.getbits(self.characteristics2, 1)), "characteristics2[1]={}")
self.add_field('supports_smbus', bool(bitfields.getbits(self.characteristics2, 2)), "characteristics2[2]={}")
if self.length > 0x0D:
self.add_field('segment_group_number', u.unpack_one('<H'))
self.add_field('bus_number', u.unpack_one('B'))
self.add_field('device_function_number', u.unpack_one('B'))
self.add_field('device_number', bitfields.getbits(self.device_function_number, 7, 3), "device_function_number[7:3]={}")
self.add_field('function_number', bitfields.getbits(self.device_function_number, 2, 0), "device_function_number[2:0]={}")
except:
self.decodeFailure = True
print "Error parsing SystemSlots"
import traceback
traceback.print_exc()
self.fini()
class Inactive(SmbiosBaseStructure):
smbios_structure_type = 126
def __init__(self, u, sm):
super(Inactive, self).__init__(u, sm)
self.fini()
class EndOfTable(SmbiosBaseStructure):
smbios_structure_type = 127
def __init__(self, u, sm):
super(EndOfTable, self).__init__(u, sm)
self.fini()
class SmbiosStructureUnknown(SmbiosBaseStructure):
smbios_structure_type = None
def __init__(self, u, sm):
super(SmbiosStructureUnknown, self).__init__(u, sm)
self.fini()
_smbios_structures = [
BIOSInformation,
SystemInformation,
BaseboardInformation,
SystemEnclosure,
ProcessorInformation,
PortConnectorInfo,
SystemSlots,
EndOfTable,
SmbiosStructureUnknown, # Must always come last
]
def log_smbios_info():
with redirect.logonly():
try:
sm = SMBIOS()
print
if sm is None:
print "No SMBIOS structures found"
return
output = {}
known_types = (0, 1)
for sm_struct in sm.structures:
if sm_struct.type in known_types:
output.setdefault(sm_struct.type, []).append(sm_struct)
if len(output) == len(known_types):
break
print "SMBIOS information:"
for key in sorted(known_types):
for s in output.get(key, ["No structure of type {} found".format(key)]):
print ttypager._wrap("{}: {}".format(key, s))
except:
print "Error parsing SMBIOS information:"
import traceback
traceback.print_exc()
def dump_raw():
try:
sm = SMBIOS()
if sm:
s = "SMBIOS -- Raw bytes and structure decode.\n\n"
s += str(sm.header) + '\n'
s += bits.dumpmem(sm._header_memory) + '\n'
s += "Raw bytes for the SMBIOS structures\n"
s += bits.dumpmem(sm._structure_memory) + '\n'
for sm_struct in sm.structures:
s += str(sm_struct) + '\n'
s += bits.dumpmem(sm_struct.raw_data)
s += "Strings:\n"
for n in range(1, len(getattr(sm_struct, "strings", [])) + 1):
s += str(sm_struct.fmtstr(n)) + '\n'
s += bits.dumpmem(sm_struct.raw_strings) + '\n'
else:
s = "No SMBIOS structures found"
ttypager.ttypager_wrap(s, indent=False)
except:
print "Error parsing SMBIOS information:"
import traceback
traceback.print_exc()
def dump():
try:
sm = SMBIOS()
if sm:
s = str(sm)
else:
s = "No SMBIOS structures found"
ttypager.ttypager_wrap(s, indent=False)
except:
print "Error parsing SMBIOS information:"
import traceback
traceback.print_exc()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Household.name'
db.delete_column(u'survey_household', 'name')
# Adding field 'Household.head'
db.add_column(u'survey_household', 'head',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['survey.HouseholdHead'], null=True),
keep_default=False)
# Adding field 'Household.number_of_males'
db.add_column(u'survey_household', 'number_of_males',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.number_of_females'
db.add_column(u'survey_household', 'number_of_females',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_5_12_years'
db.add_column(u'survey_household', 'children_5_12_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_13_17_years'
db.add_column(u'survey_household', 'children_13_17_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_0_5_months'
db.add_column(u'survey_household', 'children_0_5_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_6_11_months'
db.add_column(u'survey_household', 'children_6_11_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_12_23_months'
db.add_column(u'survey_household', 'children_12_23_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_24_59_months'
db.add_column(u'survey_household', 'children_24_59_months',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.women_15_19_years'
db.add_column(u'survey_household', 'women_15_19_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Household.children_20_49_years'
db.add_column(u'survey_household', 'children_20_49_years',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Adding field 'Household.name'
db.add_column(u'survey_household', 'name',
self.gf('django.db.models.fields.CharField')(default='dummy', max_length=100),
keep_default=False)
# Deleting field 'Household.head'
db.delete_column(u'survey_household', 'head_id')
# Deleting field 'Household.number_of_males'
db.delete_column(u'survey_household', 'number_of_males')
# Deleting field 'Household.number_of_females'
db.delete_column(u'survey_household', 'number_of_females')
# Deleting field 'Household.children_5_12_years'
db.delete_column(u'survey_household', 'children_5_12_years')
# Deleting field 'Household.children_13_17_years'
db.delete_column(u'survey_household', 'children_13_17_years')
# Deleting field 'Household.children_0_5_months'
db.delete_column(u'survey_household', 'children_0_5_months')
# Deleting field 'Household.children_6_11_months'
db.delete_column(u'survey_household', 'children_6_11_months')
# Deleting field 'Household.children_12_23_months'
db.delete_column(u'survey_household', 'children_12_23_months')
# Deleting field 'Household.children_24_59_months'
db.delete_column(u'survey_household', 'children_24_59_months')
# Deleting field 'Household.women_15_19_years'
db.delete_column(u'survey_household', 'women_15_19_years')
# Deleting field 'Household.children_20_49_years'
db.delete_column(u'survey_household', 'children_20_49_years')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'children_0_5_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_12_23_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_13_17_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_20_49_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_24_59_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_5_12_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'children_6_11_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'head': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.HouseholdHead']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number_of_females': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'number_of_males': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'women_15_19_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Household'", 'max_length': '100'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'resident_since': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicators'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'})
}
}
complete_apps = ['survey']
| |
import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import logging
import electrum
from electrum.util import format_satoshis
from electrum.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum.transaction import TxOutput
from electrum.wallet import Wallet
from electrum.storage import WalletStorage
from electrum.network import NetworkParameters, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import deserialize_server
from electrum.logging import console_stderr_handler
_ = lambda x:x # i18n
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
console_stderr_handler.setLevel(logging.CRITICAL)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event, *args):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for tx_hash, tx_mined_status, value, balance in self.wallet.get_history():
if tx_mined_status.conf:
timestamp = tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[key] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid Bitcoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
self.show_message(repr(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
self.show_message(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
self.show_message(msg)
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = deserialize_server(server)
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(host, port, protocol, proxy, auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(list(items))*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
| |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import types
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.data import TSDataset
import zoo.orca.automl.hp as hp
from zoo.chronos.autots.model import AutoModelFactory
from zoo.chronos.autots.tspipeline import TSPipeline
class AutoTSEstimator:
"""
Automated TimeSeries Estimator for time series forecasting task, which supports
TSDataset and customized data creator as data input on built-in model (only
"lstm", "tcn", "seq2seq" for now) and 3rd party model.
Only backend="torch" is supported for now. Customized data creator has not been
fully supported by TSPipeline.
>>> # Here is a use case example:
>>> # prepare train/valid/test tsdataset
>>> autoest = AutoTSEstimator(model="lstm",
>>> search_space=search_space,
>>> past_seq_len=6,
>>> future_seq_len=1)
>>> tsppl = autoest.fit(data=tsdata_train,
>>> validation_data=tsdata_valid)
>>> tsppl.predict(tsdata_test)
>>> tsppl.save("my_tsppl")
"""
def __init__(self,
model="lstm",
search_space=dict(),
metric="mse",
loss=None,
optimizer="Adam",
past_seq_len=2,
future_seq_len=1,
input_feature_num=None,
output_target_num=None,
selected_features="auto",
backend="torch",
logs_dir="/tmp/autots_estimator",
cpus_per_trial=1,
name="autots_estimator",
remote_dir=None,
):
"""
AutoTSEstimator trains a model for time series forecasting.
Users can choose one of the built-in models, or pass in a customized pytorch or keras model
for tuning using AutoML.
:param model: a string or a model creation function.
A string indicates a built-in model, currently "lstm", "tcn", "seq2seq" are
supported.
A model creation function indicates a 3rd party model, the function should take a
config param and return a torch.nn.Module (backend="torch") / tf model
(backend="keras").
If you use chronos.data.TSDataset as data input, the 3rd party
should have 3 dim input (num_sample, past_seq_len, input_feature_num) and 3 dim
output (num_sample, future_seq_len, output_feature_num) and use the same key
in the model creation function. If you use a customized data creator, the output of
data creator should fit the input of model creation function.
:param search_space: str or dict. hyper parameter configurations. For str, you can choose
from "minimal", "normal", or "large", each represents a default search_space for
our built-in model with different computing requirement. For dict, Read the API docs
for each auto model. Some common hyper parameter can be explicitly set in named
parameter. search_space should contain those parameters other than the keyword
arguments in this constructor in its key. If a 3rd parth model is used, then you
must set search_space to a dict.
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param loss: String or pytorch/tf.keras loss instance or pytorch loss creator function. The
default loss function for pytorch backend is nn.MSELoss().
:param optimizer: String or pyTorch optimizer creator function or
tf.keras optimizer instance.
:param past_seq_len: Int or or hp sampling function. The number of historical steps (i.e.
lookback) used for forecasting. For hp sampling, see zoo.orca.automl.hp for more
details. The values defaults to 2.
:param future_seq_len: Int. The number of future steps to forecast. The value defaults
to 1.
:param input_feature_num: Int. The number of features in the input. The value is ignored if
you use chronos.data.TSDataset as input data type.
:param output_target_num: Int. The number of targets in the output. The value is ignored if
you use chronos.data.TSDataset as input data type.
:param selected_features: String. "all" and "auto" are supported for now. For "all",
all features that are generated are used for each trial. For "auto", a subset
is sampled randomly from all features for each trial. The parameter is ignored
if not using chronos.data.TSDataset as input data type. The value defaults
to "auto".
:param backend: The backend of the auto model. We only support backend as "torch" for now.
:param logs_dir: Local directory to save logs and results.
It defaults to "/tmp/autots_estimator"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the autots estimator. It defaults to "autots_estimator".
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
"""
# check backend and set default loss
if backend != "torch":
raise ValueError(f"We only support backend as torch. Got {backend}")
else:
import torch
if loss is None:
loss = torch.nn.MSELoss()
if isinstance(search_space, str):
search_space = AutoModelFactory.get_default_search_space(model, search_space)
if isinstance(model, types.FunctionType) and backend == "torch":
# pytorch 3rd party model
from zoo.orca.automl.auto_estimator import AutoEstimator
self.model = AutoEstimator.from_torch(model_creator=model,
optimizer=optimizer,
loss=loss,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
name=name)
self.metric = metric
search_space.update({"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_feature_num": output_target_num})
self.search_space = search_space
if isinstance(model, str):
# built-in model
# update auto model common search space
search_space.update({"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_target_num": output_target_num,
"loss": loss,
"metric": metric,
"optimizer": optimizer,
"backend": backend,
"logs_dir": logs_dir,
"cpus_per_trial": cpus_per_trial,
"name": name})
# create auto model from name
self.model = AutoModelFactory.create_auto_model(name=model,
search_space=search_space)
# save selected features setting for data creator generation
self.selected_features = selected_features
self._scaler = None
self._scaler_index = None
def fit(self,
data,
epochs=1,
batch_size=32,
validation_data=None,
metric_threshold=None,
n_sampling=1,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None
):
"""
fit using AutoEstimator
:param data: train data.
For backend of "torch", data can be a TSDataset or a function that takes a
config dictionary as parameter and returns a PyTorch DataLoader.
For backend of "keras", data can be a TSDataset.
:param epochs: Max number of epochs to train in each trial. Defaults to 1.
If you have also set metric_threshold, a trial will stop if either it has been
optimized to the metric_threshold or it has been trained for {epochs} epochs.
:param batch_size: Int or hp sampling function from an integer space. Training batch size.
It defaults to 32.
:param validation_data: Validation data. Validation data type should be the same as data.
:param metric_threshold: a trial will be terminated when metric threshold is met.
:param n_sampling: Number of times to sample from the search_space. Defaults to 1.
If hp.grid_search is in search_space, the grid will be repeated n_sampling of times.
If this is -1, (virtually) infinite samples are generated
until a stopping condition is met.
:param search_alg: str, all supported searcher provided by ray tune
(i.e."variant_generator", "random", "ax", "dragonfly", "skopt",
"hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and
"sigopt")
:param search_alg_params: extra parameters for searcher algorithm besides search_space,
metric and searcher mode
:param scheduler: str, all supported scheduler provided by ray tune
:param scheduler_params: parameters for scheduler
:return: a TSPipeline with the best model.
"""
is_third_party_model = isinstance(self.model, AutoEstimator)
# generate data creator from TSDataset (pytorch base require validation data)
if isinstance(data, TSDataset) and isinstance(validation_data, TSDataset):
train_d, val_d = self._prepare_data_creator(
search_space=self.search_space if is_third_party_model else self.model.search_space,
train_data=data,
val_data=validation_data,
)
self._scaler = data.scaler
self._scaler_index = data.scaler_index
else:
train_d, val_d = data, validation_data
if is_third_party_model:
self.search_space.update({"batch_size": batch_size})
self.model.fit(
data=train_d,
epochs=epochs,
validation_data=val_d,
metric=self.metric,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_space=self.search_space,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params,
)
if not is_third_party_model:
self.model.fit(
data=train_d,
epochs=epochs,
batch_size=batch_size,
validation_data=val_d,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params
)
return TSPipeline(best_model=self._get_best_automl_model(),
best_config=self.get_best_config(),
scaler=self._scaler,
scaler_index=self._scaler_index)
def _prepare_data_creator(self, search_space, train_data, val_data=None):
"""
prepare the data creators and add selected features to search_space
:param search_space: the search space
:param train_data: train data
:param val_data: validation data
:return: data creators from train and validation data
"""
import torch
from torch.utils.data import TensorDataset, DataLoader
import ray
# automatically inference output_feature_num
# input_feature_num will be set by base pytorch model according to selected features.
search_space['output_feature_num'] = len(train_data.target_col)
# append feature selection into search space
# TODO: more flexible setting
all_features = train_data.feature_col
if self.selected_features not in ("all", "auto"):
raise ValueError(f"Only \"all\" and \"auto\" are supported for selected_features,\
but found {self.selected_features}")
if self.selected_features == "auto":
if len(all_features) == 0:
search_space['selected_features'] = all_features
else:
search_space['selected_features'] = hp.choice_n(all_features,
min_items=0,
max_items=len(all_features))
if self.selected_features == "all":
search_space['selected_features'] = all_features
# put train/val data in ray
train_data_id = ray.put(train_data)
valid_data_id = ray.put(val_data)
def train_data_creator(config):
train_d = ray.get(train_data_id)
x, y = train_d.roll(lookback=config.get('past_seq_len'),
horizon=config.get('future_seq_len'),
feature_col=config['selected_features']) \
.to_numpy()
return DataLoader(TensorDataset(torch.from_numpy(x).float(),
torch.from_numpy(y).float()),
batch_size=config["batch_size"],
shuffle=True)
def val_data_creator(config):
val_d = ray.get(valid_data_id)
x, y = val_d.roll(lookback=config.get('past_seq_len'),
horizon=config.get('future_seq_len'),
feature_col=config['selected_features']) \
.to_numpy()
return DataLoader(TensorDataset(torch.from_numpy(x).float(),
torch.from_numpy(y).float()),
batch_size=config["batch_size"],
shuffle=True)
return train_data_creator, val_data_creator
def _get_best_automl_model(self):
"""
For internal use only.
:return: the best automl model instance
"""
return self.model._get_best_automl_model()
def get_best_config(self):
"""
Get the best configuration
:return: A dictionary of best hyper parameters
"""
return self.model.get_best_config()
| |
#!/usr/bin/env python
"""Test the various collection objects."""
import itertools
import math
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import collections
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
class TypedRDFValueCollection(collections.RDFValueCollection):
_rdf_type = rdf_paths.PathSpec
class TestCollections(test_lib.AFF4ObjectTest):
def testRDFValueCollections(self):
urn = "aff4:/test/collection"
fd = aff4.FACTORY.Create(urn, "RDFValueCollection",
mode="w", token=self.token)
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd.Close()
fd = aff4.FACTORY.Open(urn, token=self.token)
# Make sure items are stored in order.
j = 0
for j, x in enumerate(fd):
self.assertEqual(j, x.request_id)
self.assertEqual(j, 4)
for j in range(len(fd)):
self.assertEqual(fd[j].request_id, j)
self.assertIsNone(fd[5])
def testRDFValueCollectionsAppend(self):
urn = "aff4:/test/collection"
fd = aff4.FACTORY.Create(urn, "RDFValueCollection",
mode="w", token=self.token)
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd.Close()
fd = aff4.FACTORY.Open(urn, "RDFValueCollection",
mode="rw", token=self.token)
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i + 5))
fd.Close()
fd = aff4.FACTORY.Open(urn, token=self.token)
# Make sure items are stored in order.
j = 0
for j, x in enumerate(fd):
self.assertEqual(j, x.request_id)
self.assertEqual(j, 9)
def testChunkSize(self):
urn = "aff4:/test/chunktest"
fd = aff4.FACTORY.Create(urn, "RDFValueCollection",
mode="w", token=self.token)
fd.SetChunksize(1024 * 1024)
# Estimate the size of the resulting message.
msg = rdf_flows.GrrMessage(request_id=100)
msg_size = len(rdf_protodict.EmbeddedRDFValue(payload=msg)
.SerializeToString())
# Write ~500Kb.
n = 500 * 1024 / msg_size
fd.AddAll([rdf_flows.GrrMessage(request_id=i) for i in xrange(n)])
self.assertEqual(fd.fd.Get(fd.fd.Schema._CHUNKSIZE), 1024 * 1024)
# There should be 500K of data.
self.assertGreater(fd.fd.size, 400 * 1024)
# and there should only be one chunk since 500K is less than the chunk size.
self.assertEqual(len(fd.fd.chunk_cache._hash), 1)
fd.Close()
# Closing the collection empties the chunk_cache.
self.assertEqual(len(fd.fd.chunk_cache._hash), 0)
self.assertRaises(ValueError, fd.SetChunksize, (10))
fd = aff4.FACTORY.Open(urn, "RDFValueCollection",
mode="rw", token=self.token)
self.assertRaises(ValueError, fd.SetChunksize, (2 * 1024 * 1024))
def testAddingNoneToUntypedCollectionRaises(self):
urn = "aff4:/test/collection"
fd = aff4.FACTORY.Create(urn, "RDFValueCollection",
mode="w", token=self.token)
self.assertRaises(ValueError, fd.Add, None)
self.assertRaises(ValueError, fd.AddAll, [None])
def testAddingNoneViaAddMethodToTypedCollectionWorksCorrectly(self):
urn = "aff4:/test/collection"
fd = aff4.FACTORY.Create(urn, "TypedRDFValueCollection",
mode="w", token=self.token)
# This works, because Add() accepts keyword arguments and builds _rdf_type
# instance out of them. In the current case there are no keyword arguments
# specified, so we get default value.
fd.Add(None)
fd.Close()
fd = aff4.FACTORY.Open(urn, token=self.token)
self.assertEqual(len(fd), 1)
self.assertEqual(fd[0], rdf_paths.PathSpec())
def testAddingNoneViaAddAllMethodToTypedCollectionRaises(self):
urn = "aff4:/test/collection"
fd = aff4.FACTORY.Create(urn, "RDFValueCollection",
mode="w", token=self.token)
self.assertRaises(ValueError, fd.AddAll, [None])
class TestPackedVersionedCollection(test_lib.AFF4ObjectTest):
"""Test for PackedVersionedCollection."""
collection_urn = rdfvalue.RDFURN("aff4:/test/packed_collection")
def setUp(self):
super(TestPackedVersionedCollection, self).setUp()
# For the sake of test's performance, make COMPACTION_BATCH_SIZE and
# MAX_REVERSED_RESULTS reasonably small.
self.stubber = utils.MultiStubber(
(aff4.PackedVersionedCollection, "COMPACTION_BATCH_SIZE", 100),
(aff4.PackedVersionedCollection, "MAX_REVERSED_RESULTS", 100),
(aff4.PackedVersionedCollection, "INDEX_INTERVAL", 100))
self.stubber.Start()
def tearDown(self):
self.stubber.Stop()
super(TestPackedVersionedCollection, self).tearDown()
try:
self.journaling_setter.Stop()
del self.journaling_setter
except AttributeError:
pass
def testAddMethodWritesToVersionedAttributeAndNotToStream(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=1))
# Check that items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 1)
# Check that no items are stored in the stream
fd = aff4.FACTORY.Create(self.collection_urn.Add("Stream"), "AFF4Image",
mode="rw", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
def testAddAllMethodWritesToVersionedAttributeAndNotToStream(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll([rdf_flows.GrrMessage(request_id=1),
rdf_flows.GrrMessage(request_id=1)])
# Check that items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 2)
# Check that no items are stored in the stream
fd = aff4.FACTORY.Create(self.collection_urn.Add("Stream"), "AFF4Image",
mode="rw", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
def testAddToCollectionClassMethodAddsVersionedAttributes(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as _:
pass
aff4.PackedVersionedCollection.AddToCollection(
self.collection_urn, [rdf_flows.GrrMessage(request_id=1),
rdf_flows.GrrMessage(request_id=2)],
token=self.token)
# Check that items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
self.collection_urn,
aff4.PackedVersionedCollection.SchemaCls.DATA.predicate,
token=self.token, timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 2)
# Check that no items are stored in the stream
fd = aff4.FACTORY.Create(self.collection_urn.Add("Stream"), "AFF4Image",
mode="rw", token=self.token)
self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
# Check that collection reports correct size.
fd = aff4.FACTORY.Open(self.collection_urn,
aff4_type="PackedVersionedCollection",
token=self.token)
self.assertEqual(len(fd), 2)
self.assertEqual(len(list(fd.GenerateUncompactedItems())), 2)
def _testRandomAccessEqualsIterator(self, step=1):
# Check that random access works correctly for different age modes.
for age in [aff4.NEWEST_TIME, aff4.ALL_TIMES]:
fd = aff4.FACTORY.Open(self.collection_urn, age=age, token=self.token)
model_data = list(fd.GenerateItems())
for index in xrange(len(model_data), step):
self.assertEqual(fd[index], model_data[index])
self.assertListEqual(list(fd.GenerateItems(offset=index)),
model_data[index:])
self.assertFalse(list(fd.GenerateItems(offset=len(model_data))))
def testUncompactedCollectionIteratesInRightOrderWhenSmall(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(len(list(fd)), 5)
# Make sure items are stored in correct order.
for index, item in enumerate(fd):
self.assertEqual(index, item.request_id)
def testRandomAccessWorksCorrectlyForSmallUncompactedCollection(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
self._testRandomAccessEqualsIterator()
def testUncompactedCollectionIteratesInReversedOrderWhenLarge(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(fd.MAX_REVERSED_RESULTS + 1):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(len(list(fd)), fd.MAX_REVERSED_RESULTS + 1)
# Due to the way uncompacted items are stored, they come back
# from the data store in reversed order. When there are too
# many of them, it's too expensive to reverse them, so we
# give up and return then in reversed order (newest first).
for index, item in enumerate(reversed(list(fd))):
self.assertEqual(index, item.request_id)
def testRandomAccessWorksCorrectlyForLargeUncompactedCollection(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(fd.MAX_REVERSED_RESULTS + 1):
fd.Add(rdf_flows.GrrMessage(request_id=i))
self._testRandomAccessEqualsIterator(step=10)
def testIteratesOverBothCompactedAndUncompcatedParts(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
with aff4.FACTORY.Open(self.collection_urn, "PackedVersionedCollection",
mode="rw", token=self.token) as fd:
for i in range(5, 10):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(len(list(fd)), 10)
# Make sure items are stored in correct order.
for index, item in enumerate(fd):
self.assertEqual(index, item.request_id)
def testRandomAccessWorksCorrectlyForSemiCompactedCollection(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
self._testRandomAccessEqualsIterator()
def testIteratesInSemiReversedOrderWhenUncompcatedPartIsLarge(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
with aff4.FACTORY.Open(self.collection_urn, "PackedVersionedCollection",
mode="rw", token=self.token) as fd:
for i in range(5, fd.MAX_REVERSED_RESULTS + 6):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
results = list(fd)
self.assertEqual(len(results), fd.MAX_REVERSED_RESULTS + 6)
# We have too many uncompacted values. First the compacted values
# will be iterated in the correct order. Then uncompacted values
# will be iterated in reversed order (due to the order of
# results returned by data_store.DB.ResolveRegex - see
# data_store.py for details).
index_list = itertools.chain(
range(5), reversed(range(5, fd.MAX_REVERSED_RESULTS + 6)))
for i, index in enumerate(index_list):
self.assertEqual(index, results[i].request_id)
def testRandomAccessWorksCorrectlyWhenUncompactedPartIsLarge(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
with aff4.FACTORY.Open(self.collection_urn, "PackedVersionedCollection",
mode="rw", token=self.token) as fd:
for i in range(5, fd.MAX_REVERSED_RESULTS + 6):
fd.Add(rdf_flows.GrrMessage(request_id=i))
self._testRandomAccessEqualsIterator()
def testRandomAccessWorksCorrectlyForIndexIntervalsFrom1To10(self):
for index_interval in range(1, 11):
with utils.Stubber(
aff4.PackedVersionedCollection, "INDEX_INTERVAL", index_interval):
aff4.FACTORY.Delete(self.collection_urn, token=self.token)
with aff4.FACTORY.Create(
self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token):
pass
for i in range(20):
with aff4.FACTORY.Open(
self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(len(fd.GetIndex()),
int(math.ceil(20.0 / index_interval)))
self._testRandomAccessEqualsIterator()
def testIndexIsUsedWhenRandomAccessIsUsed(self):
with utils.MultiStubber(
(aff4.PackedVersionedCollection, "COMPACTION_BATCH_SIZE", 100),
(aff4.PackedVersionedCollection, "INDEX_INTERVAL", 1)):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token):
pass
for i in range(20):
with aff4.FACTORY.Open(
self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
collection = aff4.FACTORY.Open(self.collection_urn, token=self.token)
item_size = collection.fd.size / len(collection)
# There's no seek expected for the first element
for i in range(1, 20):
seek_ops = []
old_seek = collection.fd.Seek
def SeekStub(offset):
seek_ops.append(offset) # pylint: disable=cell-var-from-loop
old_seek(offset) # pylint: disable=cell-var-from-loop
# Check that the stream is seeked to a correct byte offset on every
# GenerateItems() call with an offset specified.
with utils.Stubber(collection.fd, "Seek", SeekStub):
_ = list(collection.GenerateItems(offset=i))
self.assertListEqual([item_size * i], seek_ops)
def testItemsCanBeAddedToCollectionInWriteOnlyMode(self):
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
# Now add 5 more items in "write-only" mode.
with aff4.FACTORY.Open(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5, 10):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(fd.CalculateLength(), 10)
results = list(fd)
self.assertEqual(len(results), 10)
for i in range(10):
self.assertEqual(i, results[i].request_id)
# Check that compaction works on items added in write-only mode.
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
# Check that everything works as expected after second compaction.
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(fd.CalculateLength(), 10)
results = list(fd)
self.assertEqual(len(results), 10)
for i in range(10):
self.assertEqual(i, results[i].request_id)
def testBooleanBehavior(self):
collection_urn = rdfvalue.RDFURN("aff4:/bool_test/packed_collection")
with aff4.FACTORY.Create(collection_urn,
"PackedVersionedCollection",
mode="rw", token=self.token) as fd:
self.assertFalse(fd)
fd.AddAll([rdf_flows.GrrMessage(request_id=i) for i in range(3)])
self.assertTrue(fd)
with aff4.FACTORY.OpenWithLock(collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 3)
self.assertTrue(fd)
# Check that no items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 0)
with aff4.FACTORY.Create(collection_urn,
"PackedVersionedCollection",
mode="rw", token=self.token) as fd:
self.assertTrue(fd)
def _testCompactsCollectionSuccessfully(self, num_elements):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
elements = []
for i in range(num_elements):
elements.append(rdf_flows.GrrMessage(request_id=i))
fd.AddAll(elements)
# Check that items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), num_elements)
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, num_elements)
# Check that no items are stored in the versions.
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 0)
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(len(list(fd)), num_elements)
# Make sure items are stored in correct order.
for index, item in enumerate(fd):
self.assertEqual(index, item.request_id)
def testCompactsSmallCollectionSuccessfully(self):
self._testCompactsCollectionSuccessfully(5)
def testIndexIsWritteAfterFirstCompaction(self):
self._testCompactsCollectionSuccessfully(5)
collection = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
index = collection.GetIndex()
self.assertEqual(len(index), 1)
self.assertEqual(index[0], (5, collection.fd.size))
def testCompactsLargeCollectionSuccessfully(self):
# When number of versioned attributes is too big, compaction
# happens in batches. Ensure that 2 batches are created.
self._testCompactsCollectionSuccessfully(
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1)
def testIndexIsWrittenPerCompactionBatchIfIndexIntervalEqualToBatchSize(self):
# Index is supposed to be updated every time a compaction batch is written.
# Index only gets updated if it's empty or more than INDEX_INTERVAL elements
# got written into the stream.
#
# In the current test INDEX_INTERVAL is equal to COMPACTION_BATCH_SIZE. The
# batches are written in reversed order. So it will be first updated after
# the batch with 1 element is written and then after a btach with
# COMPACTION_BATCH_SIZE elements is written.
self._testCompactsCollectionSuccessfully(
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1)
collection = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
index = collection.GetIndex()
index.reverse()
self.assertEqual(len(index), 2)
self.assertEqual(index[0], (1, collection.fd.size / (
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1)))
self.assertEqual(index[1], (
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1,
collection.fd.size))
def testIndexIsWrittenAtMostOncePerCompactionBatch(self):
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE = 100
aff4.PackedVersionedCollection.INDEX_INTERVAL = 1
self._testCompactsCollectionSuccessfully(
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1)
collection = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
index = collection.GetIndex()
index.reverse()
# Even though index interval is 1, it gets updated only when each
# compaction batch is written to a stream.
self.assertEqual(len(index), 2)
self.assertEqual(index[0], (1, collection.fd.size / (
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1)))
self.assertEqual(index[1], (
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE + 1,
collection.fd.size))
def testCompactsVeryLargeCollectionSuccessfully(self):
# When number of versioned attributes is too big, compaction
# happens in batches. Ensure that 5 batches are created.
self._testCompactsCollectionSuccessfully(
aff4.PackedVersionedCollection.COMPACTION_BATCH_SIZE * 5 - 1)
def testSecondCompactionDoesNothing(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
# On second attempt, nothing should get compacted.
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 0)
def testSecondCompactionDoesNotUpdateIndex(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
self.assertEqual(len(fd.GetIndex()), 1)
# Second compaction did not update the index.
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
self.assertEqual(len(fd.GetIndex()), 1)
def testSecondCompactionOfLargeCollectionDoesNothing(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(fd.COMPACTION_BATCH_SIZE + 1):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, fd.COMPACTION_BATCH_SIZE + 1)
# On second attempt, nothing should get compacted.
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 0)
def testSecondCompactionofLargeCollectionDoesNotUpdateIndex(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(fd.COMPACTION_BATCH_SIZE + 1):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
self.assertEqual(len(fd.GetIndex()), 2)
# Second compaction did not update the index.
with aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token) as fd:
fd.Compact()
self.assertEqual(len(fd.GetIndex()), 2)
def testTimestampsArePreservedAfterCompaction(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
for i in range(5):
with test_lib.FakeTime(i * 1000):
fd.Add(rdf_flows.GrrMessage(request_id=i))
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
for index, item in enumerate(fd):
self.assertEqual(int(item.age.AsSecondsFromEpoch()), 1000 * index)
with aff4.FACTORY.OpenWithLock(
self.collection_urn, "PackedVersionedCollection",
token=self.token) as fd:
num_compacted = fd.Compact()
self.assertEqual(num_compacted, 5)
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
for index, item in enumerate(fd):
self.assertEqual(int(item.age.AsSecondsFromEpoch()), 1000 * index)
def testItemsAddedWhileCompactionIsInProgressAreNotDeleted(self):
with test_lib.FakeTime(0):
fd = aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token)
for i in range(4):
with test_lib.FakeTime(i * 1000):
fd.Add(rdf_flows.GrrMessage(request_id=i))
with test_lib.FakeTime(3000):
fd.Close()
with test_lib.FakeTime(3500):
fd = aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
token=self.token)
# Imitating that another element was added in parallel while compaction
# is in progress.
with test_lib.FakeTime(4000):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="rw", token=self.token) as write_fd:
write_fd.Add(rdf_flows.GrrMessage(request_id=4))
with test_lib.FakeTime(3500):
num_compacted = fd.Compact()
fd.Close()
# One item should be left uncompacted as its' timestamp is 4000,
# i.e. it was added after the compaction started.
self.assertEqual(num_compacted, 4)
# Check that one uncompacted item was left (see the comment above).
items = list(data_store.DB.ResolveRegex(
fd.urn, fd.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(items), 1)
# Check that collection is still properly enumerated and reports the
# correct size.
fd = aff4.FACTORY.Open(self.collection_urn, token=self.token)
self.assertEqual(fd.CalculateLength(), 5)
for index, item in enumerate(fd):
self.assertEqual(int(item.age.AsSecondsFromEpoch()), 1000 * index)
def testExtendsLeaseIfCompactionTakesTooLong(self):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
elements = []
for i in range(10):
elements.append(rdf_flows.GrrMessage(request_id=i))
fd.AddAll(elements)
with test_lib.ConfigOverrider({"Worker.compaction_lease_time": 42}):
with test_lib.FakeTime(20):
# Lease time here is much less than compaction_lease_time,
# collection will have to extend the lease immediately
# when compaction starts.
fd = aff4.FACTORY.OpenWithLock(self.collection_urn,
"PackedVersionedCollection",
lease_time=10, token=self.token)
# This is the expected lease time: time.time() + lease_time
self.assertEqual(fd.CheckLease(), 10)
with test_lib.FakeTime(29):
fd.Compact()
# Compaction should have updated the lease.
self.assertEqual(fd.CheckLease(), 42)
def testNoJournalEntriesAreAddedWhenJournalingIsDisabled(self):
with test_lib.ConfigOverrider({
"Worker.enable_packed_versioned_collection_journaling": False}):
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=42))
fd.AddAll([rdf_flows.GrrMessage(request_id=43),
rdf_flows.GrrMessage(request_id=44)])
aff4.PackedVersionedCollection.AddToCollection(
self.collection_urn, [rdf_flows.GrrMessage(request_id=1),
rdf_flows.GrrMessage(request_id=2)],
token=self.token)
with aff4.FACTORY.OpenWithLock(self.collection_urn,
token=self.token) as fd:
fd.Compact()
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
self.assertFalse(fd.IsAttributeSet(fd.Schema.ADDITION_JOURNAL))
self.assertFalse(fd.IsAttributeSet(fd.Schema.COMPACTION_JOURNAL))
def _EnableJournaling(self):
self.journaling_setter = test_lib.ConfigOverrider({
"Worker.enable_packed_versioned_collection_journaling": True})
self.journaling_setter.Start()
def testJournalEntryIsAddedAfterSingeAddCall(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=42))
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = list(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL))
self.assertEqual(len(addition_journal), 1)
self.assertEqual(addition_journal[0], 1)
def testTwoJournalEntriesAreAddedAfterTwoConsecutiveAddCalls(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=42))
fd.Add(rdf_flows.GrrMessage(request_id=43))
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(addition_journal), 2)
self.assertEqual(addition_journal[0], 1)
self.assertEqual(addition_journal[1], 1)
def testTwoJournalEntriesAreAddedAfterAddCallsSeparatedByFlush(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=42))
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.Add(rdf_flows.GrrMessage(request_id=43))
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(addition_journal), 2)
self.assertEqual(addition_journal[0], 1)
self.assertEqual(addition_journal[1], 1)
def testJournalEntryIsAddedAfterSingleAddAllCall(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
elements = []
for i in range(10):
elements.append(rdf_flows.GrrMessage(request_id=i))
fd.AddAll(elements)
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = list(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL))
self.assertEqual(len(addition_journal), 1)
self.assertEqual(addition_journal[0], 10)
def testTwoJournalEntriesAreAddedAfterTwoConsecutiveAddAllCall(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
elements = []
for i in range(10):
elements.append(rdf_flows.GrrMessage(request_id=i))
fd.AddAll(elements)
fd.AddAll(elements[:5])
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(addition_journal), 2)
self.assertEqual(addition_journal[0], 10)
self.assertEqual(addition_journal[1], 5)
def testTwoJournalEntriesAreAddedAfterTwoAddAllCallsSeparatedByFlush(self):
self._EnableJournaling()
elements = []
for i in range(10):
elements.append(rdf_flows.GrrMessage(request_id=i))
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll(elements)
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll(elements[:5])
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(addition_journal), 2)
self.assertEqual(addition_journal[0], 10)
self.assertEqual(addition_journal[1], 5)
def testJournalEntryIsAddedAfterSingleAddToCollectionCall(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as _:
pass
aff4.PackedVersionedCollection.AddToCollection(
self.collection_urn, [rdf_flows.GrrMessage(request_id=1),
rdf_flows.GrrMessage(request_id=2)],
token=self.token)
fd = aff4.FACTORY.Open(self.collection_urn,
age=aff4.ALL_TIMES,
token=self.token)
addition_journal = list(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL))
self.assertEqual(len(addition_journal), 1)
self.assertEqual(addition_journal[0], 2)
def testTwoJournalEntriesAreAddedAfterTwoAddToCollectionCalls(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn, "PackedVersionedCollection",
mode="w", token=self.token) as _:
pass
aff4.PackedVersionedCollection.AddToCollection(
self.collection_urn, [rdf_flows.GrrMessage(request_id=1),
rdf_flows.GrrMessage(request_id=2)],
token=self.token)
aff4.PackedVersionedCollection.AddToCollection(
self.collection_urn, [rdf_flows.GrrMessage(request_id=3)],
token=self.token)
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
addition_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.ADDITION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(addition_journal), 2)
self.assertEqual(addition_journal[0], 2)
self.assertEqual(addition_journal[1], 1)
def testJournalEntryIsAddedAfterSingleCompaction(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll([rdf_flows.GrrMessage(request_id=42),
rdf_flows.GrrMessage(request_id=42)])
with aff4.FACTORY.OpenWithLock(self.collection_urn, token=self.token) as fd:
fd.Compact()
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
compaction_journal = list(
fd.GetValuesForAttribute(fd.Schema.COMPACTION_JOURNAL))
self.assertEqual(len(compaction_journal), 1)
self.assertEqual(compaction_journal[0], 2)
def testTwoJournalEntriesAreAddedAfterTwoCompactions(self):
self._EnableJournaling()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll([rdf_flows.GrrMessage(request_id=42),
rdf_flows.GrrMessage(request_id=42)])
with aff4.FACTORY.OpenWithLock(self.collection_urn, token=self.token) as fd:
fd.Compact()
with aff4.FACTORY.Create(self.collection_urn,
"PackedVersionedCollection",
mode="w", token=self.token) as fd:
fd.AddAll([rdf_flows.GrrMessage(request_id=42)])
with aff4.FACTORY.OpenWithLock(self.collection_urn, token=self.token) as fd:
fd.Compact()
fd = aff4.FACTORY.Open(self.collection_urn, age=aff4.ALL_TIMES,
token=self.token)
compaction_journal = sorted(
fd.GetValuesForAttribute(fd.Schema.COMPACTION_JOURNAL),
key=lambda x: x.age)
self.assertEqual(len(compaction_journal), 2)
self.assertEqual(compaction_journal[0], 2)
self.assertEqual(compaction_journal[1], 1)
| |
from __future__ import unicode_literals
import re
import frappe
import psycopg2
import psycopg2.extensions
from six import string_types
from frappe.utils import cstr
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from frappe.database.database import Database
from frappe.database.postgres.schema import PostgresTable
# cast decimals as floats
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
'DEC2FLOAT',
lambda value, curs: float(value) if value is not None else None)
psycopg2.extensions.register_type(DEC2FLOAT)
class PostgresDatabase(Database):
ProgrammingError = psycopg2.ProgrammingError
OperationalError = psycopg2.OperationalError
InternalError = psycopg2.InternalError
SQLError = psycopg2.ProgrammingError
DataError = psycopg2.DataError
InterfaceError = psycopg2.InterfaceError
REGEX_CHARACTER = '~'
def setup_type_map(self):
self.type_map = {
'Currency': ('decimal', '18,6'),
'Int': ('bigint', None),
'Long Int': ('bigint', None), # convert int to bigint if length is more than 11
'Float': ('decimal', '18,6'),
'Percent': ('decimal', '18,6'),
'Check': ('smallint', None),
'Small Text': ('text', ''),
'Long Text': ('text', ''),
'Code': ('text', ''),
'Text Editor': ('text', ''),
'Date': ('date', ''),
'Datetime': ('timestamp', None),
'Time': ('time', '6'),
'Text': ('text', ''),
'Data': ('varchar', self.VARCHAR_LEN),
'Link': ('varchar', self.VARCHAR_LEN),
'Dynamic Link': ('varchar', self.VARCHAR_LEN),
'Password': ('varchar', self.VARCHAR_LEN),
'Select': ('varchar', self.VARCHAR_LEN),
'Read Only': ('varchar', self.VARCHAR_LEN),
'Attach': ('text', ''),
'Attach Image': ('text', ''),
'Signature': ('text', ''),
'Color': ('varchar', self.VARCHAR_LEN),
'Barcode': ('text', ''),
'Geolocation': ('text', '')
}
def get_connection(self):
# warnings.filterwarnings('ignore', category=psycopg2.Warning)
conn = psycopg2.connect('host={} dbname={}'.format(self.host, self.user))
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # TODO: Remove this
# conn = psycopg2.connect('host={} dbname={} user={} password={}'.format(self.host,
# self.user, self.user, self.password))
return conn
def escape(self, s, percent=True):
"""Excape quotes and percent in given string."""
if isinstance(s, bytes):
s = s.decode('utf-8')
if percent:
s = s.replace("%", "%%")
s = s.encode('utf-8')
return str(psycopg2.extensions.QuotedString(s))
def get_database_size(self):
''''Returns database size in MB'''
db_size = self.sql("SELECT (pg_database_size(%s) / 1024 / 1024) as database_size",
self.db_name, as_dict=True)
return db_size[0].get('database_size')
# pylint: disable=W0221
def sql(self, *args, **kwargs):
if len(args):
# since tuple is immutable
args = list(args)
args[0] = modify_query(args[0])
args = tuple(args)
elif kwargs.get('query'):
kwargs['query'] = modify_query(kwargs.get('query'))
return super(PostgresDatabase, self).sql(*args, **kwargs)
def get_tables(self):
return [d[0] for d in self.sql("""select table_name
from information_schema.tables
where table_catalog='{0}'
and table_type = 'BASE TABLE'
and table_schema='public'""".format(frappe.conf.db_name))]
def format_date(self, date):
if not date:
return '0001-01-01::DATE'
if isinstance(date, frappe.string_types):
if ':' not in date:
date = date + '::DATE'
else:
date = date.strftime('%Y-%m-%d') + '::DATE'
return date
# column type
@staticmethod
def is_type_number(code):
return code == psycopg2.NUMBER
@staticmethod
def is_type_datetime(code):
return code == psycopg2.DATETIME
# exception type
@staticmethod
def is_deadlocked(e):
return e.pgcode == '40P01'
@staticmethod
def is_timedout(e):
# http://initd.org/psycopg/docs/extensions.html?highlight=datatype#psycopg2.extensions.QueryCanceledError
return isinstance(e, psycopg2.extensions.QueryCanceledError)
@staticmethod
def is_table_missing(e):
return e.pgcode == '42P01'
@staticmethod
def is_missing_column(e):
return e.pgcode == '42703'
@staticmethod
def is_access_denied(e):
return e.pgcode == '42501'
@staticmethod
def cant_drop_field_or_key(e):
return e.pgcode.startswith('23')
@staticmethod
def is_duplicate_entry(e):
return e.pgcode == '23505'
@staticmethod
def is_primary_key_violation(e):
return e.pgcode == '23505' and '_pkey' in cstr(e.args[0])
@staticmethod
def is_unique_key_violation(e):
return e.pgcode == '23505' and '_key' in cstr(e.args[0])
@staticmethod
def is_duplicate_fieldname(e):
return e.pgcode == '42701'
def create_auth_table(self):
self.sql_ddl("""create table if not exists "__Auth" (
"doctype" VARCHAR(140) NOT NULL,
"name" VARCHAR(255) NOT NULL,
"fieldname" VARCHAR(140) NOT NULL,
"password" VARCHAR(255) NOT NULL,
"encrypted" INT NOT NULL DEFAULT 0,
PRIMARY KEY ("doctype", "name", "fieldname")
)""")
def create_global_search_table(self):
if not '__global_search' in self.get_tables():
self.sql('''create table "__global_search"(
doctype varchar(100),
name varchar({0}),
title varchar({0}),
content text,
route varchar({0}),
published int not null default 0,
unique (doctype, name))'''.format(self.VARCHAR_LEN))
def create_user_settings_table(self):
self.sql_ddl("""create table if not exists "__UserSettings" (
"user" VARCHAR(180) NOT NULL,
"doctype" VARCHAR(180) NOT NULL,
"data" TEXT,
UNIQUE ("user", "doctype")
)""")
def create_help_table(self):
self.sql('''CREATE TABLE "help"(
"path" varchar(255),
"content" text,
"title" text,
"intro" text,
"full_path" text)''')
self.sql('''CREATE INDEX IF NOT EXISTS "help_index" ON "help" ("path")''')
def updatedb(self, doctype, meta=None):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = self.sql("select issingle from `tabDocType` where name='{}'".format(doctype))
if not res:
raise Exception('Wrong doctype {0} in updatedb'.format(doctype))
if not res[0][0]:
db_table = PostgresTable(doctype, meta)
db_table.validate()
self.commit()
db_table.sync()
self.begin()
@staticmethod
def get_on_duplicate_update(key='name'):
if isinstance(key, list):
key = '", "'.join(key)
return 'ON CONFLICT ("{key}") DO UPDATE SET '.format(
key=key
)
def check_transaction_status(self, query):
pass
def has_index(self, table_name, index_name):
return self.sql("""SELECT 1 FROM pg_indexes WHERE tablename='{table_name}'
and indexname='{index_name}' limit 1""".format(table_name=table_name, index_name=index_name))
def add_index(self, doctype, fields, index_name=None):
"""Creates an index with given fields if not already created.
Index name will be `fieldname1_fieldname2_index`"""
index_name = index_name or self.get_index_name(fields)
table_name = 'tab' + doctype
self.commit()
self.sql("""CREATE INDEX IF NOT EXISTS "{}" ON `{}`("{}")""".format(index_name, table_name, '", "'.join(fields)))
def add_unique(self, doctype, fields, constraint_name=None):
if isinstance(fields, string_types):
fields = [fields]
if not constraint_name:
constraint_name = "unique_" + "_".join(fields)
if not self.sql("""
SELECT CONSTRAINT_NAME
FROM information_schema.TABLE_CONSTRAINTS
WHERE table_name=%s
AND constraint_type='UNIQUE'
AND CONSTRAINT_NAME=%s""",
('tab' + doctype, constraint_name)):
self.commit()
self.sql("""ALTER TABLE `tab%s`
ADD CONSTRAINT %s UNIQUE (%s)""" % (doctype, constraint_name, ", ".join(fields)))
def get_table_columns_description(self, table_name):
"""Returns list of column and its description"""
# pylint: disable=W1401
return self.sql('''
SELECT a.column_name AS name,
CASE a.data_type
WHEN 'character varying' THEN CONCAT('varchar(', a.character_maximum_length ,')')
WHEN 'timestamp without TIME zone' THEN 'timestamp'
ELSE a.data_type
END AS type,
COUNT(b.indexdef) AS Index,
COALESCE(a.column_default, NULL) AS default,
BOOL_OR(b.unique) AS unique
FROM information_schema.columns a
LEFT JOIN
(SELECT indexdef, tablename, indexdef LIKE '%UNIQUE INDEX%' AS unique
FROM pg_indexes
WHERE tablename='{table_name}') b
ON SUBSTRING(b.indexdef, '\(.*\)') LIKE CONCAT('%', a.column_name, '%')
WHERE a.table_name = '{table_name}'
GROUP BY a.column_name, a.data_type, a.column_default, a.character_maximum_length;'''
.format(table_name=table_name), as_dict=1)
def get_database_list(self, target):
return [d[0] for d in self.sql("SELECT datname FROM pg_database;")]
def modify_query(query):
""""Modifies query according to the requirements of postgres"""
# replace ` with " for definitions
query = query.replace('`', '"')
query = replace_locate_with_strpos(query)
# select from requires ""
if re.search('from tab', query, flags=re.IGNORECASE):
query = re.sub('from tab([a-zA-Z]*)', r'from "tab\1"', query, flags=re.IGNORECASE)
return query
def replace_locate_with_strpos(query):
# strpos is the locate equivalent in postgres
if re.search(r'locate\(', query, flags=re.IGNORECASE):
query = re.sub(r'locate\(([^,]+),([^)]+)\)', r'strpos(\2, \1)', query, flags=re.IGNORECASE)
return query
| |
import pytz
from django import forms
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.flatpages.models import FlatPage
from funfactory.urlresolvers import reverse
from airmozilla.base.forms import BaseModelForm
from airmozilla.main.models import (Approval, Category, Event, EventOldSlug,
Location, Participant, Tag, Template)
TIMEZONE_CHOICES = [(tz, tz.replace('_', ' ')) for tz in pytz.common_timezones]
class UserEditForm(BaseModelForm):
class Meta:
model = User
fields = ('is_active', 'is_staff', 'is_superuser', 'groups')
def clean(self):
cleaned_data = super(UserEditForm, self).clean()
is_active = cleaned_data.get('is_active')
is_staff = cleaned_data.get('is_staff')
is_superuser = cleaned_data.get('is_superuser')
groups = cleaned_data.get('groups')
if is_superuser and not is_staff:
raise forms.ValidationError('Superusers must be staff.')
if is_staff and not is_active:
raise forms.ValidationError('Staff must be active.')
if is_staff and not is_superuser and not groups:
raise forms.ValidationError(
'Non-superuser staff must belong to a group.'
)
return cleaned_data
class GroupEditForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
choices = self.fields['permissions'].choices
self.fields['permissions'] = forms.MultipleChoiceField(
choices=choices,
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta:
model = Group
class UserFindForm(BaseModelForm):
class Meta:
model = User
fields = ('email',)
def clean_email(self):
email = self.cleaned_data['email']
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
raise forms.ValidationError('User with this email not found.')
return user.email
class EventRequestForm(BaseModelForm):
tags = forms.CharField(required=False)
participants = forms.CharField(required=False)
timezone = forms.ChoiceField(
choices=TIMEZONE_CHOICES,
initial=settings.TIME_ZONE, label='Time zone'
)
def __init__(self, *args, **kwargs):
super(EventRequestForm, self).__init__(*args, **kwargs)
self.fields['participants'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New Participant'
'</a>' % reverse('manage:participant_new'))
self.fields['location'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New location'
'</a>' % reverse('manage:location_new'))
self.fields['category'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New category'
'</a>' % reverse('manage:category_new'))
self.fields['placeholder_img'].label = 'Placeholder image'
if 'instance' in kwargs:
event = kwargs['instance']
approvals = event.approval_set.all()
self.initial['approvals'] = [app.group for app in approvals]
if event.pk:
tag_format = lambda objects: ','.join(map(unicode, objects))
participants_formatted = tag_format(event.participants.all())
tags_formatted = tag_format(event.tags.all())
self.initial['tags'] = tags_formatted
self.initial['participants'] = participants_formatted
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
t, __ = Tag.objects.get_or_create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_participants(self):
participants = self.cleaned_data['participants']
split_participants = [p.strip() for p in participants.split(',')
if p.strip()]
final_participants = []
for participant_name in split_participants:
p = Participant.objects.get(name=participant_name)
final_participants.append(p)
return final_participants
def clean_slug(self):
"""Enforce unique slug across current slugs and old slugs."""
slug = self.cleaned_data['slug']
if (Event.objects.filter(slug=slug).exclude(pk=self.instance.id)
or EventOldSlug.objects.filter(slug=slug)):
raise forms.ValidationError('This slug is already in use.')
return slug
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'rows': 4}),
'short_description': forms.Textarea(attrs={'rows': 2}),
'call_info': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'template_environment': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'start_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
'archive_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
}
exclude = ('featured', 'status', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'placeholder_img', 'description',
'short_description', 'location', 'start_time', 'timezone',
'participants', 'category', 'tags', 'call_info',
'additional_links', 'public'
)
class EventEditForm(EventRequestForm):
approvals = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(permissions__codename='change_approval'),
required=False,
widget=forms.CheckboxSelectMultiple()
)
class Meta(EventRequestForm.Meta):
exclude = ('archive_time',)
# Fields specified to enforce order
fields = (
'title', 'slug', 'status', 'public', 'featured', 'template',
'template_environment', 'placeholder_img', 'location',
'description', 'short_description', 'start_time', 'archive_time',
'timezone', 'participants', 'category', 'tags', 'call_info',
'additional_links', 'approvals'
)
class EventExperiencedRequestForm(EventEditForm):
class Meta(EventEditForm.Meta):
#widgets = EventRequestForm.Meta.widgets
#widgets['approvals'] = forms.CheckboxSelectMultiple()
#widgets['approvals'] = forms.Textarea()
exclude = ('featured', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'status', 'public', 'template',
'template_environment', 'placeholder_img', 'description',
'short_description', 'location', 'start_time', 'timezone',
'participants', 'category', 'tags', 'call_info',
'additional_links', 'public', 'approvals'
)
class EventArchiveForm(BaseModelForm):
archive_time = forms.IntegerField()
def __init__(self, *args, **kwargs):
super(EventArchiveForm, self).__init__(*args, **kwargs)
self.fields['archive_time'].help_text = (
'<div id="archive_time_slider"></div>'
)
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('template', 'template_environment')
class EventFindForm(BaseModelForm):
class Meta:
model = Event
fields = ('title',)
def clean_title(self):
title = self.cleaned_data['title']
if not Event.objects.filter(title__icontains=title):
raise forms.ValidationError('No event with this title found.')
return title
class ParticipantEditForm(BaseModelForm):
class Meta:
model = Participant
exclude = ('creator', 'clear_token')
class ParticipantFindForm(BaseModelForm):
class Meta:
model = Participant
fields = ('name',)
def clean_name(self):
name = self.cleaned_data['name']
if not Participant.objects.filter(name__icontains=name):
raise forms.ValidationError('No participant with this name found.')
return name
class CategoryForm(BaseModelForm):
class Meta:
model = Category
class TemplateEditForm(BaseModelForm):
class Meta:
model = Template
widgets = {
'content': forms.Textarea(attrs={'rows': 20})
}
class LocationEditForm(BaseModelForm):
timezone = forms.ChoiceField(choices=TIMEZONE_CHOICES)
def __init__(self, *args, **kwargs):
super(LocationEditForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
initial = kwargs['instance'].timezone
else:
initial = settings.TIME_ZONE
self.initial['timezone'] = initial
class Meta:
model = Location
class ApprovalForm(BaseModelForm):
class Meta:
model = Approval
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class FlatPageEditForm(BaseModelForm):
class Meta:
model = FlatPage
fields = ('url', 'title', 'content')
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions that export a baked SNeRG model for viewing in the web-viewer."""
import json
import math
import multiprocessing
import jax
import numpy as np
from PIL import Image
import tensorflow as tf
from snerg.nerf import utils
def save_8bit_png(img_and_path):
"""Save an 8bit numpy array as a PNG on disk.
Args:
img_and_path: A tuple of an image (numpy array, 8bit,
[height, width, channels]) and a path where the image is saved (string).
"""
img, pth = img_and_path
with utils.open_file(pth, 'wb') as imgout:
Image.fromarray(img).save(imgout, 'PNG')
def synchronize_jax_hosts():
"""Makes sure that the JAX hosts have all reached this point."""
# Build an array containing the host_id.
num_local_devices = jax.local_device_count()
num_hosts = jax.host_count()
host_id = jax.host_id()
dummy_array = np.ones((num_local_devices, 1), dtype=np.int32) * host_id
# Then broadcast it between all JAX hosts. This makes sure that all hosts are
# in sync, and have reached this point in the code.
gathered_array = jax.pmap(
lambda x: jax.lax.all_gather(x, axis_name='i'), axis_name='i')(
dummy_array)
gathered_array = np.reshape(
gathered_array[0], (num_hosts, num_local_devices, 1))
# Finally, make sure that the data is exactly what we expect.
for i in range(num_hosts):
assert gathered_array[i][0] == i
def parallel_write_images(image_write_fn, img_and_path_list):
"""Parallelizes image writing over JAX hosts and CPU cores.
Args:
image_write_fn: A function that takes a tuple as input (path, image) and
writes the result to disk.
img_and_path_list: A list of tuples (image, path) containing all the images
that should be written.
"""
num_hosts = jax.host_count()
host_id = jax.host_id()
num_images = len(img_and_path_list)
num_images_per_batch = math.ceil(num_images / num_hosts)
# First shard the images onto each host.
per_host_images_and_paths = []
for i in range(num_images_per_batch):
base_index = i * num_hosts
global_index = base_index + host_id
if global_index < num_images:
per_host_images_and_paths.append(img_and_path_list[global_index])
# Now within each JAX host, use multi-processing to save the sharded images.
with multiprocessing.pool.ThreadPool() as pool:
pool.map(image_write_fn, per_host_images_and_paths)
pool.close()
pool.join()
def export_snerg_scene(output_directory, atlas, atlas_block_indices,
viewdir_mlp_params, render_params, atlas_params,
scene_params, input_height, input_width, input_focal):
"""Exports a scene to web-viewer format: a collection of PNGs and a JSON file.
The scene gets exported to output_directory/png. Any previous results will
be overwritten.
Args:
output_directory: The root directory where the scene gets written.
atlas: The SNeRG scene packed as a texture atlas in a [S, S, N, C] numpy
array, where the channels C contain both RGB and features.
atlas_block_indices: The indirection grid of the SNeRG scene, represented as
a numpy int32 array of size (bW, bH, bD, 3).
viewdir_mlp_params: A dict containing the MLP parameters for the per-sample
view-dependence MLP.
render_params: A dict with parameters for high-res rendering.
atlas_params: A dict with params for building the 3D texture atlas.
scene_params: A dict for scene specific params (bbox, rotation, resolution).
input_height: Height (pixels) of the NDC camera (i.e. the training cameras).
input_width: Width (pixels) of the NDC camera (i.e. the training cameras).
input_focal: Focal length (pixels) of the NDC camera (i.e. the
training cameras).
"""
# Slice the atlas into images.
rgbs = []
alphas = []
for i in range(0, atlas.shape[2], 4):
rgb_stack = []
alpha_stack = []
for j in range(4):
plane_index = i + j
rgb_stack.append(atlas[:, :, plane_index, :][Ellipsis,
0:3].transpose([1, 0, 2]))
alpha_stack.append(
atlas[:, :,
plane_index, :][Ellipsis,
scene_params['_channels']].transpose([1, 0]))
rgbs.append(np.concatenate(rgb_stack, axis=0))
alphas.append(np.concatenate(alpha_stack, axis=0))
atlas_index_image = np.transpose(atlas_block_indices, [2, 1, 0, 3]).reshape(
(-1, atlas_block_indices.shape[0], 3)).astype(np.uint8)
# Build a dictionary of the scene parameters, so we can export it as a json.
export_scene_params = {}
export_scene_params['voxel_size'] = float(render_params['_voxel_size'])
export_scene_params['block_size'] = atlas_params['_data_block_size']
export_scene_params['grid_width'] = int(render_params['_grid_size'][0])
export_scene_params['grid_height'] = int(render_params['_grid_size'][1])
export_scene_params['grid_depth'] = int(render_params['_grid_size'][2])
export_scene_params['atlas_width'] = atlas.shape[0]
export_scene_params['atlas_height'] = atlas.shape[1]
export_scene_params['atlas_depth'] = atlas.shape[2]
export_scene_params['num_slices'] = len(rgbs)
export_scene_params['min_x'] = float(scene_params['min_xyz'][0])
export_scene_params['min_y'] = float(scene_params['min_xyz'][1])
export_scene_params['min_z'] = float(scene_params['min_xyz'][2])
export_scene_params['atlas_blocks_x'] = int(atlas.shape[0] /
atlas_params['atlas_block_size'])
export_scene_params['atlas_blocks_y'] = int(atlas.shape[1] /
atlas_params['atlas_block_size'])
export_scene_params['atlas_blocks_z'] = int(atlas.shape[2] /
atlas_params['atlas_block_size'])
export_scene_params['input_height'] = float(input_height)
export_scene_params['input_width'] = float(input_width)
export_scene_params['input_focal'] = float(input_focal)
export_scene_params['worldspace_T_opengl'] = scene_params[
'worldspace_T_opengl'].tolist()
export_scene_params['ndc'] = scene_params['ndc']
# Also include the network weights in this dictionary.
export_scene_params['0_weights'] = viewdir_mlp_params['params']['Dense_0'][
'kernel'].tolist()
export_scene_params['1_weights'] = viewdir_mlp_params['params']['Dense_1'][
'kernel'].tolist()
export_scene_params['2_weights'] = viewdir_mlp_params['params']['Dense_3'][
'kernel'].tolist()
export_scene_params['0_bias'] = viewdir_mlp_params['params']['Dense_0'][
'bias'].tolist()
export_scene_params['1_bias'] = viewdir_mlp_params['params']['Dense_1'][
'bias'].tolist()
export_scene_params['2_bias'] = viewdir_mlp_params['params']['Dense_3'][
'bias'].tolist()
# To avoid partial overwrites, first dump the scene to a temporary directory.
output_tmp_directory = output_directory + '/temp'
if jax.host_id() == 0:
# Delete the folder if it already exists.
if utils.isdir(output_tmp_directory):
tf.io.gfile.rmtree(output_tmp_directory)
utils.makedirs(output_tmp_directory)
# Now store the indirection grid.
atlas_indices_path = '%s/atlas_indices.png' % output_tmp_directory
if jax.host_id() == 0:
save_8bit_png((atlas_index_image, atlas_indices_path))
# Make sure that all JAX hosts have reached this point in the code before we
# proceed. Things will get tricky if output_tmp_directory doesn't yet exist.
synchronize_jax_hosts()
# Save the alpha values and RGB colors as one set of PNG images.
output_images = []
output_paths = []
for i, rgb_and_alpha in enumerate(zip(rgbs, alphas)):
rgb, alpha = rgb_and_alpha
rgba = np.concatenate([rgb, np.expand_dims(alpha, -1)], axis=-1)
uint_multiplier = 2.0**8 - 1.0
rgba = np.minimum(uint_multiplier,
np.maximum(0.0, np.floor(uint_multiplier * rgba))).astype(
np.uint8)
output_images.append(rgba)
atlas_rgba_path = '%s/rgba_%03d.png' % (output_tmp_directory, i)
output_paths.append(atlas_rgba_path)
# Save the computed features a separate collection of PNGs.
uint_multiplier = 2.0**8 - 1.0
for i in range(0, atlas.shape[2], 4):
feature_stack = []
for j in range(4):
plane_index = i + j
feature_slice = atlas[:, :, plane_index, :][Ellipsis,
3:-1].transpose([1, 0, 2])
feature_slice = np.minimum(
uint_multiplier,
np.maximum(0.0, np.floor(uint_multiplier * feature_slice))).astype(
np.uint8)
feature_stack.append(feature_slice)
output_images.append(np.concatenate(feature_stack, axis=0))
for i in range(len(rgbs)):
output_paths.append('%s/feature_%03d.png' % (output_tmp_directory, i))
parallel_write_images(save_8bit_png, list(zip(output_images, output_paths)))
# Now export the scene parameters and the network weights as a JSON.
export_scene_params['format'] = 'png'
scene_params_path = '%s/scene_params.json' % output_tmp_directory
if jax.host_id() == 0:
with utils.open_file(scene_params_path, 'wb') as f:
f.write(json.dumps(export_scene_params).encode('utf-8'))
# Again, make sure that the JAX hosts are in sync. Don't delete
# output_tmp_directory before all files have been written.
synchronize_jax_hosts()
# Finally move the scene to the appropriate output path.
output_png_directory = output_directory + '/png'
if jax.host_id() == 0:
# Delete the folder if it already exists.
if utils.isdir(output_png_directory):
tf.io.gfile.rmtree(output_png_directory)
tf.io.gfile.rename(output_tmp_directory, output_png_directory)
def compute_scene_size(output_directory, atlas_block_indices, atlas_params,
scene_params):
"""Computes the size of an exported SNeRG scene.
Args:
output_directory: The root directory where the SNeRG scene was written.
atlas_block_indices: The indirection grid of the SNeRG scene.
atlas_params: A dict with params for building the 3D texture atlas.
scene_params: A dict for scene specific params (bbox, rotation, resolution).
Returns:
png_size_gb: The scene size (in GB) when stored as compressed 8-bit PNGs.
byte_size_gb: The scene size (in GB), stored as uncompressed 8-bit integers.
float_size_gb: The scene size (in GB), stored as uncompressed 32-bit floats.
"""
output_png_directory = output_directory + '/png'
png_files = [
output_png_directory + '/' + f
for f in sorted(utils.listdir(output_png_directory))
if f.endswith('png')
]
png_size_gb = sum(
[tf.io.gfile.stat(f).length / (1000 * 1000 * 1000) for f in png_files])
block_index_size_gb = np.array(
atlas_block_indices.shape).prod() / (1000 * 1000 * 1000)
active_atlas_blocks = (atlas_block_indices[Ellipsis, 0] >= 0).sum()
active_atlas_voxels = (
active_atlas_blocks * atlas_params['atlas_block_size']**3)
active_atlas_channels = active_atlas_voxels * scene_params['_channels']
byte_size_gb = active_atlas_channels / (1000 * 1000 *
1000) + block_index_size_gb
float_size_gb = active_atlas_channels * 4 / (1000 * 1000 *
1000) + block_index_size_gb
return png_size_gb, byte_size_gb, float_size_gb
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .instance_view_status import InstanceViewStatus
from .sub_resource import SubResource
from .sku import Sku
from .availability_set import AvailabilitySet
from .virtual_machine_size import VirtualMachineSize
from .virtual_machine_extension_image import VirtualMachineExtensionImage
from .virtual_machine_image_resource import VirtualMachineImageResource
from .virtual_machine_extension_instance_view import VirtualMachineExtensionInstanceView
from .virtual_machine_extension import VirtualMachineExtension
from .purchase_plan import PurchasePlan
from .os_disk_image import OSDiskImage
from .data_disk_image import DataDiskImage
from .virtual_machine_image import VirtualMachineImage
from .usage_name import UsageName
from .usage import Usage
from .virtual_machine_capture_parameters import VirtualMachineCaptureParameters
from .virtual_machine_capture_result import VirtualMachineCaptureResult
from .plan import Plan
from .hardware_profile import HardwareProfile
from .image_reference import ImageReference
from .key_vault_secret_reference import KeyVaultSecretReference
from .key_vault_key_reference import KeyVaultKeyReference
from .disk_encryption_settings import DiskEncryptionSettings
from .virtual_hard_disk import VirtualHardDisk
from .managed_disk_parameters import ManagedDiskParameters
from .os_disk import OSDisk
from .data_disk import DataDisk
from .storage_profile import StorageProfile
from .additional_unattend_content import AdditionalUnattendContent
from .win_rm_listener import WinRMListener
from .win_rm_configuration import WinRMConfiguration
from .windows_configuration import WindowsConfiguration
from .ssh_public_key import SshPublicKey
from .ssh_configuration import SshConfiguration
from .linux_configuration import LinuxConfiguration
from .vault_certificate import VaultCertificate
from .vault_secret_group import VaultSecretGroup
from .os_profile import OSProfile
from .network_interface_reference import NetworkInterfaceReference
from .network_profile import NetworkProfile
from .boot_diagnostics import BootDiagnostics
from .diagnostics_profile import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view import VirtualMachineAgentInstanceView
from .disk_instance_view import DiskInstanceView
from .boot_diagnostics_instance_view import BootDiagnosticsInstanceView
from .virtual_machine_identity import VirtualMachineIdentity
from .virtual_machine_instance_view import VirtualMachineInstanceView
from .virtual_machine import VirtualMachine
from .upgrade_policy import UpgradePolicy
from .image_os_disk import ImageOSDisk
from .image_data_disk import ImageDataDisk
from .image_storage_profile import ImageStorageProfile
from .image import Image
from .virtual_machine_scale_set_identity import VirtualMachineScaleSetIdentity
from .virtual_machine_scale_set_os_profile import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_managed_disk_parameters import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_data_disk import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile import VirtualMachineScaleSetStorageProfile
from .api_entity_reference import ApiEntityReference
from .virtual_machine_scale_set_ip_configuration import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_network_configuration import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_network_profile import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_extension import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set import VirtualMachineScaleSet
from .virtual_machine_scale_set_vm_instance_ids import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku import VirtualMachineScaleSetSku
from .virtual_machine_scale_set_vm import VirtualMachineScaleSetVM
from .virtual_machine_scale_set_vm_instance_view import VirtualMachineScaleSetVMInstanceView
from .api_error_base import ApiErrorBase
from .inner_error import InnerError
from .api_error import ApiError
from .compute_long_running_operation_properties import ComputeLongRunningOperationProperties
from .resource import Resource
from .sub_resource_read_only import SubResourceReadOnly
from .operation_status_response import OperationStatusResponse
from .resource_update import ResourceUpdate
from .image_disk_reference import ImageDiskReference
from .creation_data import CreationData
from .source_vault import SourceVault
from .key_vault_and_secret_reference import KeyVaultAndSecretReference
from .key_vault_and_key_reference import KeyVaultAndKeyReference
from .encryption_settings import EncryptionSettings
from .disk import Disk
from .disk_update import DiskUpdate
from .grant_access_data import GrantAccessData
from .access_uri import AccessUri
from .snapshot import Snapshot
from .snapshot_update import SnapshotUpdate
from .availability_set_paged import AvailabilitySetPaged
from .virtual_machine_size_paged import VirtualMachineSizePaged
from .usage_paged import UsagePaged
from .image_paged import ImagePaged
from .virtual_machine_paged import VirtualMachinePaged
from .virtual_machine_scale_set_paged import VirtualMachineScaleSetPaged
from .virtual_machine_scale_set_sku_paged import VirtualMachineScaleSetSkuPaged
from .virtual_machine_scale_set_vm_paged import VirtualMachineScaleSetVMPaged
from .disk_paged import DiskPaged
from .snapshot_paged import SnapshotPaged
from .compute_management_client_enums import (
StatusLevelTypes,
OperatingSystemTypes,
VirtualMachineSizeTypes,
CachingTypes,
DiskCreateOptionTypes,
StorageAccountTypes,
PassNames,
ComponentNames,
SettingNames,
ProtocolTypes,
ResourceIdentityType,
UpgradeMode,
OperatingSystemStateTypes,
VirtualMachineScaleSetSkuScaleType,
DiskCreateOption,
AccessLevel,
InstanceViewTypes,
)
__all__ = [
'InstanceViewStatus',
'SubResource',
'Sku',
'AvailabilitySet',
'VirtualMachineSize',
'VirtualMachineExtensionImage',
'VirtualMachineImageResource',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension',
'PurchasePlan',
'OSDiskImage',
'DataDiskImage',
'VirtualMachineImage',
'UsageName',
'Usage',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'Plan',
'HardwareProfile',
'ImageReference',
'KeyVaultSecretReference',
'KeyVaultKeyReference',
'DiskEncryptionSettings',
'VirtualHardDisk',
'ManagedDiskParameters',
'OSDisk',
'DataDisk',
'StorageProfile',
'AdditionalUnattendContent',
'WinRMListener',
'WinRMConfiguration',
'WindowsConfiguration',
'SshPublicKey',
'SshConfiguration',
'LinuxConfiguration',
'VaultCertificate',
'VaultSecretGroup',
'OSProfile',
'NetworkInterfaceReference',
'NetworkProfile',
'BootDiagnostics',
'DiagnosticsProfile',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineAgentInstanceView',
'DiskInstanceView',
'BootDiagnosticsInstanceView',
'VirtualMachineIdentity',
'VirtualMachineInstanceView',
'VirtualMachine',
'UpgradePolicy',
'ImageOSDisk',
'ImageDataDisk',
'ImageStorageProfile',
'Image',
'VirtualMachineScaleSetIdentity',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetStorageProfile',
'ApiEntityReference',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineStatusCodeCount',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetVM',
'VirtualMachineScaleSetVMInstanceView',
'ApiErrorBase',
'InnerError',
'ApiError',
'ComputeLongRunningOperationProperties',
'Resource',
'SubResourceReadOnly',
'OperationStatusResponse',
'ResourceUpdate',
'ImageDiskReference',
'CreationData',
'SourceVault',
'KeyVaultAndSecretReference',
'KeyVaultAndKeyReference',
'EncryptionSettings',
'Disk',
'DiskUpdate',
'GrantAccessData',
'AccessUri',
'Snapshot',
'SnapshotUpdate',
'AvailabilitySetPaged',
'VirtualMachineSizePaged',
'UsagePaged',
'ImagePaged',
'VirtualMachinePaged',
'VirtualMachineScaleSetPaged',
'VirtualMachineScaleSetSkuPaged',
'VirtualMachineScaleSetVMPaged',
'DiskPaged',
'SnapshotPaged',
'StatusLevelTypes',
'OperatingSystemTypes',
'VirtualMachineSizeTypes',
'CachingTypes',
'DiskCreateOptionTypes',
'StorageAccountTypes',
'PassNames',
'ComponentNames',
'SettingNames',
'ProtocolTypes',
'ResourceIdentityType',
'UpgradeMode',
'OperatingSystemStateTypes',
'VirtualMachineScaleSetSkuScaleType',
'DiskCreateOption',
'AccessLevel',
'InstanceViewTypes',
]
| |
# Copyright 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Isilon specific NAS backend plugin.
"""
import os
from oslo_config import cfg
from oslo_log import log
from oslo_utils import units
import six
from manila.common import constants as const
from manila import exception
from manila.i18n import _, _LW
from manila.share.drivers.emc.plugins import base
from manila.share.drivers.emc.plugins.isilon import isilon_api
CONF = cfg.CONF
VERSION = "0.1.0"
LOG = log.getLogger(__name__)
class IsilonStorageConnection(base.StorageConnection):
"""Implements Isilon specific functionality for EMC Manila driver."""
def __init__(self, *args, **kwargs):
super(IsilonStorageConnection, self).__init__(*args, **kwargs)
self._server = None
self._port = None
self._username = None
self._password = None
self._server_url = None
self._connect_resp = None
self._root_dir = None
self._verify_ssl_cert = None
self._containers = {}
self._shares = {}
self._snapshots = {}
self._isilon_api = None
self._isilon_api_class = isilon_api.IsilonApi
self.driver_handles_share_servers = False
def _get_container_path(self, share):
"""Return path to a container."""
return os.path.join(self._root_dir, share['name'])
def create_share(self, context, share, share_server):
"""Is called to create share."""
if share['share_proto'] == 'NFS':
location = self._create_nfs_share(share)
elif share['share_proto'] == 'CIFS':
location = self._create_cifs_share(share)
else:
message = (_('Unsupported share protocol: %(proto)s.') %
{'proto': share['share_proto']})
LOG.error(message)
raise exception.InvalidShare(reason=message)
# apply directory quota based on share size
max_share_size = share['size'] * units.Gi
self._isilon_api.quota_create(
self._get_container_path(share), 'directory', max_share_size)
return location
def create_share_from_snapshot(self, context, share, snapshot,
share_server):
"""Creates a share from the snapshot."""
# Create share at new location
location = self.create_share(context, share, share_server)
# Clone snapshot to new location
fq_target_dir = self._get_container_path(share)
self._isilon_api.clone_snapshot(snapshot['name'], fq_target_dir)
return location
def _create_nfs_share(self, share):
"""Is called to create nfs share."""
container_path = self._get_container_path(share)
self._isilon_api.create_directory(container_path)
share_created = self._isilon_api.create_nfs_export(container_path)
if not share_created:
message = (
_('The requested NFS share "%(share)s" was not created.') %
{'share': share['name']})
LOG.error(message)
raise exception.ShareBackendException(msg=message)
location = '{0}:{1}'.format(self._server, container_path)
return location
def _create_cifs_share(self, share):
"""Is called to create cifs share."""
# Create the directory
container_path = self._get_container_path(share)
self._isilon_api.create_directory(container_path)
self._isilon_api.create_smb_share(share['name'], container_path)
share_path = '\\\\{0}\\{1}'.format(self._server, share['name'])
return share_path
def create_snapshot(self, context, snapshot, share_server):
"""Is called to create snapshot."""
snapshot_path = os.path.join(self._root_dir, snapshot['share_name'])
self._isilon_api.create_snapshot(snapshot['name'], snapshot_path)
def delete_share(self, context, share, share_server):
"""Is called to remove share."""
if share['share_proto'] == 'NFS':
self._delete_nfs_share(share)
elif share['share_proto'] == 'CIFS':
self._delete_cifs_share(share)
else:
message = (_('Unsupported share type: %(type)s.') %
{'type': share['share_proto']})
LOG.error(message)
raise exception.InvalidShare(reason=message)
def _delete_nfs_share(self, share):
"""Is called to remove nfs share."""
share_id = self._isilon_api.lookup_nfs_export(
self._root_dir + '/' + share['name'])
if share_id is None:
lw = _LW('Attempted to delete NFS Share "%s", but the share does '
'not appear to exist.')
LOG.warning(lw, share['name'])
else:
# attempt to delete the share
export_deleted = self._isilon_api.delete_nfs_share(share_id)
if not export_deleted:
message = _('Error deleting NFS share: %s') % share['name']
LOG.error(message)
raise exception.ShareBackendException(msg=message)
def _delete_cifs_share(self, share):
"""Is called to remove CIFS share."""
smb_share = self._isilon_api.lookup_smb_share(share['name'])
if smb_share is None:
lw = _LW('Attempted to delete CIFS Share "%s", but the share does '
'not appear to exist.')
LOG.warning(lw, share['name'])
else:
share_deleted = self._isilon_api.delete_smb_share(share['name'])
if not share_deleted:
message = _('Error deleting CIFS share: %s') % share['name']
LOG.error(message)
raise exception.ShareBackendException(msg=message)
def delete_snapshot(self, context, snapshot, share_server):
"""Is called to remove snapshot."""
self._isilon_api.delete_snapshot(snapshot['name'])
def ensure_share(self, context, share, share_server):
"""Invoked to ensure that share is exported."""
def extend_share(self, share, new_size, share_server=None):
"""Extends a share."""
new_quota_size = new_size * units.Gi
self._isilon_api.quota_set(
self._get_container_path(share), 'directory', new_quota_size)
def allow_access(self, context, share, access, share_server):
"""Allow access to the share."""
if share['share_proto'] == 'NFS':
self._nfs_allow_access(share, access)
elif share['share_proto'] == 'CIFS':
self._cifs_allow_access(share, access)
else:
message = _(
'Unsupported share protocol: %s. Only "NFS" and '
'"CIFS" are currently supported share protocols.') % share[
'share_proto']
LOG.error(message)
raise exception.InvalidShare(reason=message)
def _nfs_allow_access(self, share, access):
"""Allow access to nfs share."""
access_type = access['access_type']
if access_type != 'ip':
message = _('Only "ip" access type allowed for the NFS'
'protocol.')
LOG.error(message)
raise exception.InvalidShareAccess(reason=message)
export_path = self._get_container_path(share)
access_ip = access['access_to']
access_level = access['access_level']
share_id = self._isilon_api.lookup_nfs_export(export_path)
share_access_group = 'clients'
if access_level == const.ACCESS_LEVEL_RO:
share_access_group = 'read_only_clients'
# Get current allowed clients
export = self._get_existing_nfs_export(share_id)
current_clients = export[share_access_group]
# Format of ips could be '10.0.0.2', or '10.0.0.2, 10.0.0.0/24'
ips = list()
ips.append(access_ip)
ips.extend(current_clients)
export_params = {share_access_group: ips}
url = '{0}/platform/1/protocols/nfs/exports/{1}'.format(
self._server_url, share_id)
resp = self._isilon_api.request('PUT', url, data=export_params)
resp.raise_for_status()
def _cifs_allow_access(self, share, access):
access_type = access['access_type']
access_to = access['access_to']
access_level = access['access_level']
if access_type == 'ip':
access_ip = access['access_to']
self._cifs_allow_access_ip(access_ip, share, access_level)
elif access_type == 'user':
self._cifs_allow_access_user(access_to, share, access_level)
else:
message = _('Only "ip" and "user" access types allowed for '
'CIFS protocol.')
LOG.error(message)
raise exception.InvalidShareAccess(reason=message)
def _cifs_allow_access_ip(self, ip, share, access_level):
if access_level == const.ACCESS_LEVEL_RO:
message = _('Only RW Access allowed for CIFS Protocol when using '
'the "ip" access type.')
LOG.error(message)
raise exception.InvalidShareAccess(reason=message)
allowed_ip = 'allow:' + ip
smb_share = self._isilon_api.lookup_smb_share(share['name'])
host_acl = smb_share['host_acl']
if allowed_ip not in host_acl:
host_acl.append(allowed_ip)
data = {'host_acl': host_acl}
url = ('{0}/platform/1/protocols/smb/shares/{1}'
.format(self._server_url, smb_share['name']))
r = self._isilon_api.request('PUT', url, data=data)
r.raise_for_status()
def _cifs_allow_access_user(self, user, share, access_level):
if access_level == const.ACCESS_LEVEL_RW:
smb_permission = isilon_api.SmbPermission.rw
elif access_level == const.ACCESS_LEVEL_RO:
smb_permission = isilon_api.SmbPermission.ro
else:
message = _('Only "RW" and "RO" access levels are supported.')
LOG.error(message)
raise exception.InvalidShareAccess(reason=message)
self._isilon_api.smb_permissions_add(share['name'], user,
smb_permission)
def deny_access(self, context, share, access, share_server):
"""Deny access to the share."""
if share['share_proto'] == 'NFS':
self._nfs_deny_access(share, access)
elif share['share_proto'] == 'CIFS':
self._cifs_deny_access(share, access)
def _nfs_deny_access(self, share, access):
"""Deny access to nfs share."""
if access['access_type'] != 'ip':
return
denied_ip = access['access_to']
access_level = access['access_level']
share_access_group = 'clients'
if access_level == const.ACCESS_LEVEL_RO:
share_access_group = 'read_only_clients'
# Get list of currently allowed client ips
export_id = self._isilon_api.lookup_nfs_export(
self._get_container_path(share))
if export_id is None:
message = _('Share %s should have been created, but was not '
'found.') % share['name']
LOG.error(message)
raise exception.ShareBackendException(msg=message)
export = self._get_existing_nfs_export(export_id)
try:
clients = export[share_access_group]
except KeyError:
message = (_('Export %(export_name)s should have contained the '
'JSON key %(json_key)s, but this key was not found.')
% {'export_name': share['name'],
'json_key': share_access_group})
LOG.error(message)
raise exception.ShareBackendException(msg=message)
allowed_ips = set(clients)
if allowed_ips.__contains__(denied_ip):
allowed_ips.remove(denied_ip)
data = {share_access_group: list(allowed_ips)}
url = ('{0}/platform/1/protocols/nfs/exports/{1}'
.format(self._server_url, six.text_type(export_id)))
r = self._isilon_api.request('PUT', url, data=data)
r.raise_for_status()
def _get_existing_nfs_export(self, export_id):
export = self._isilon_api.get_nfs_export(export_id)
if export is None:
message = _('NFS share with export id %d should have been '
'created, but was not found.') % export_id
LOG.error(message)
raise exception.ShareBackendException(msg=message)
return export
def _cifs_deny_access(self, share, access):
access_type = access['access_type']
if access_type == 'ip':
self._cifs_deny_access_ip(access['access_to'], share)
elif access_type == 'user':
self._cifs_deny_access_user(share, access)
else:
message = _('Access type for CIFS deny access request was '
'"%(access_type)s". Only "user" and "ip" access types '
'are supported for CIFS protocol access.') % {
'access_type': access_type}
LOG.warning(message)
def _cifs_deny_access_ip(self, denied_ip, share):
"""Deny access to cifs share."""
share_json = self._isilon_api.lookup_smb_share(share['name'])
host_acl_list = share_json['host_acl']
allow_ip = 'allow:' + denied_ip
if allow_ip in host_acl_list:
host_acl_list.remove(allow_ip)
share_params = {"host_acl": host_acl_list}
url = ('{0}/platform/1/protocols/smb/shares/{1}'
.format(self._server_url, share['name']))
resp = self._isilon_api.request('PUT', url, data=share_params)
resp.raise_for_status()
def _cifs_deny_access_user(self, share, access):
self._isilon_api.smb_permissions_remove(share['name'], access[
'access_to'])
def check_for_setup_error(self):
"""Check for setup error."""
def connect(self, emc_share_driver, context):
"""Connect to an Isilon cluster."""
self._server = emc_share_driver.configuration.safe_get(
"emc_nas_server")
self._port = (
int(emc_share_driver.configuration.safe_get("emc_nas_server_port"))
)
self._server_url = ('https://' + self._server + ':' +
six.text_type(self._port))
self._username = emc_share_driver.configuration.safe_get(
"emc_nas_login")
self._password = emc_share_driver.configuration.safe_get(
"emc_nas_password")
self._root_dir = emc_share_driver.configuration.safe_get(
"emc_nas_root_dir")
# TODO(Shaun Edwards): make verify ssl a config variable?
self._verify_ssl_cert = False
self._isilon_api = self._isilon_api_class(self._server_url, auth=(
self._username, self._password),
verify_ssl_cert=self._verify_ssl_cert)
if not self._isilon_api.is_path_existent(self._root_dir):
self._isilon_api.create_directory(self._root_dir, recursive=True)
def update_share_stats(self, stats_dict):
"""TODO."""
# TODO(Shaun Edwards): query capacity, set storage_protocol,
# QoS support?
stats_dict['driver_version'] = VERSION
def get_network_allocations_number(self):
"""Returns number of network allocations for creating VIFs."""
# TODO(Shaun Edwards)
return 0
def setup_server(self, network_info, metadata=None):
"""Set up and configures share server with given network parameters."""
# TODO(Shaun Edwards): Look into supporting share servers
def teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
# TODO(Shaun Edwards): Look into supporting share servers
| |
from __future__ import print_function
try:
from queue import Empty # Python 3
except ImportError:
from Queue import Empty # Python 2
import time
import io
from ipykernel.kernelbase import Kernel
from datetime import datetime
import os
import os.path
import tempfile
from jupyter_client.manager import KernelManager
from jupyter_client.ioloop import IOLoopKernelManager
from jupyter_core.application import JupyterApp
import re
import json
from threading import (Thread, Event, Timer)
try:
from os import getcwdu as getcwd # Python 2
except ImportError:
from os import getcwd # Python 3
import pickle
import dateutil
from .log import ExecutionInfo
from .utils import run_sync
from traitlets.config.configurable import LoggingConfigurable, MultipleInstanceError
from traitlets import (
Unicode, List, default
)
from ipython_genutils import py3compat
from ipython_genutils.py3compat import PY3
from types import MethodType
from fluent import sender
SUMMARIZE_KEY = 'lc_wrapper'
IGNORE_SUMMARIZE_KEY = 'lc_wrapper_regex'
FORCE_SUMMARIZE_KEY = 'lc_wrapper_force'
MASKING_KEY = 'lc_wrapper_masking_pattern'
LOG_MASKING_KEY = 'lc_wrapper_mask_log'
IPYTHON_DEFAULT_PATTERN_FILE = '.lc_wrapper_regex.txt'
IPYTHON_DEFAULT_PATTERN = '''ERROR|error|Error|Panic|panic|Invalid|invalid|Warning|warning|Bad|bad
FAIL|Fail|fail
(Not|not) (Found|found)
(Device)? not ready
out of (Memory|memory)
interrupt(ed)?|abort(ed)?|stop(ped)?
insecure|inaccessible|Forbidden|forbidden|Denied|denied
Unauthorised|unauthorised|Unauthorized|unauthorized
(No|no|Low|low) (.+ )?(Capacity|capacity|Space|space)
has (encountered|stopped)
is not
initialize(d)?|initialise(d)?|start(ed)?|restart(ed)?|spawn(ed)?|complete(d)?
finish(ed)?|resume(d)?|begin|attach(ed)?|detach(ed)?|reboot(ed)?|suspend(ed)?
done|terminate(d)?|open(ed)?|close(d)?|(dis)?connect(ed)?|establish(ed)?
allocate(d)?|assign(ed)?|load(ed)?|(in|re)?activate(d)?|block(ed)?|kill(ed)?
refuse(d)?|insufficient|lack
link(ed)? (up|down)'''
class ChannelReaderThread(Thread, LoggingConfigurable):
_exiting = False
def __init__(self, kernel, client, stream, session, channel, **kwargs):
Thread.__init__(self, **kwargs)
LoggingConfigurable.__init__(self, **kwargs)
self.daemon = True
self.channel_name = channel
self.channel = getattr(client, channel + "_channel")
self.kernel = kernel
self.client = client
self.stream = stream
self.session = session
self.log.debug("init ChannelReaderThread: channel_name=%s",
self.channel_name)
def run(self):
self.log.debug("start ChannelReaderThread: channel_name=%s",
self.channel_name)
get_msg = getattr(self.client, 'get_%s_msg' % self.channel_name)
while True:
try:
msg = get_msg(timeout=0.2)
self.log.debug("Received %s message: %s",
self.channel_name, str(msg))
msg_type = msg['msg_type']
idle = False
status_msg = False
if self.channel_name == 'iopub':
content = msg['content']
if msg_type == 'status':
status_msg = True
if content['execution_state'] == 'idle':
self.kernel.idle_parent_header = msg['parent_header']
idle = True
if msg['parent_header']['msg_type'] == 'shutdown_request':
continue
msg_id = msg['parent_header']['msg_id']
parent_header = self.kernel.parent_headers.get(msg_id)
self.log.debug("parent_header: %s", str(parent_header))
if self.channel_name == 'iopub':
ident = self.kernel._topic(msg_type)
msg_content = self.kernel._hook_iopub_msg(parent_header, msg)
else:
ident = self.kernel._parent_ident
msg_content = msg['content']
if not status_msg:
self.session.send(self.stream,
msg_type,
msg_content,
parent=parent_header,
ident=ident,
header=msg['header'],
metadata=msg['metadata'],
buffers=msg['buffers'])
if self.channel_name == 'stdin' and msg_type == 'input_request':
self.log.debug("do input_request")
self.input_request()
if idle:
self.kernel.idle_event.set()
parent_msg_id = msg['parent_header'].get('msg_id')
if parent_msg_id is not None:
self.kernel._remove_parent_header(parent_msg_id)
except Empty as e:
pass
except Exception as e:
self.log.error(e, exc_info=True)
finally:
if self._exiting:
break
self.log.debug("exit ChannelReaderThread: %s", self.channel_name)
def input_request(self):
self.log.debug("wait input_reply")
while True:
try:
reply = self._get_msg_from_frontend()
except Empty:
if self.kernel.keyboard_interrupt:
self.log.debug("input_request: interrupted")
return
except Exception:
self.log.warn("Invalid Message:", exc_info=True)
else:
break
self.log.debug("input_reply: %s", str(reply))
msg = self.client.session.msg(reply['msg_type'],
content=reply['content'],
parent=reply['parent_header'],
header=reply['header'],
metadata=reply['metadata'])
self.client.stdin_channel.send(msg)
def _get_msg_from_frontend(self, timeout=200):
ready = self.stream.poll(timeout)
if ready:
return self._recv_from_frontend()
else:
raise Empty
def _recv_from_frontend(self, **kwargs):
msg = self.stream.recv_multipart(**kwargs)
ident,smsg = self.session.feed_identities(msg)
return self.session.deserialize(smsg)
def stop(self):
if self.is_alive():
self._exiting = True
self.join()
class BufferedKernelBase(Kernel):
blocking_msg_types = [
'execute_request',
'history_request',
'complete_request',
'inspect_request',
'kernel_info_request',
'comm_info_request',
'shutdown_request'
]
proxy_channles = ['iopub', 'stdin']
threads = {}
parent_headers = {}
idle_event = Event()
idle_parent_header = None
keyboard_interrupt = False
execute_request_msg_id = None
log_file_object = None
data_dir = Unicode()
@default('data_dir')
def _data_dir_default(self):
app = None
try:
if JupyterApp.initialized():
app = JupyterApp.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = JupyterApp()
app.initialize(argv=[])
return app.data_dir
server_signature_file = Unicode(
help="""The file where the server signature is stored."""
).tag(config=True)
@default('server_signature_file')
def _server_signature_file_default(self):
if 'lc_nblineage_server_signature_path' in os.environ:
return os.environ['lc_nblineage_server_signature_path']
if not self.data_dir:
return ''
return os.path.join(self.data_dir, 'server_signature')
keyword_pattern_file_paths = List()
@default('keyword_pattern_file_paths')
def _keyword_pattern_file_paths_default(self):
return [
os.path.join(self.get_notebook_path(), IPYTHON_DEFAULT_PATTERN_FILE),
os.path.join(os.path.expanduser('~/'), IPYTHON_DEFAULT_PATTERN_FILE)
]
log_dirs = List()
@default('log_dirs')
def _log_dirs_default(self):
return [
os.path.join(self.get_notebook_path(), '.log'),
os.path.expanduser('~/.log')
]
configfile_paths = List()
@default('configfile_paths')
def _configfile_paths_default(self):
return [
os.path.join(self.get_notebook_path(), '.lc_wrapper'),
os.path.join(os.path.expanduser('~/'), '.lc_wrapper')
]
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
if 'lc_wrapper_fluentd_host' in os.environ:
fluentd_host = os.environ['lc_wrapper_fluentd_host']
fluentd_port = int(os.environ.get('lc_wrapper_fluentd_port', '24224'))
fluentd_tag = os.environ.get('lc_wrapper_fluentd_tag', 'lc_wrapper')
self.sender = sender.FluentSender(fluentd_tag,
host=fluentd_host,
port=fluentd_port)
self.log.info('lc_wrapper: Enabled fluent logger: host=%s, port=%s, tag=%s',
fluentd_host, fluentd_port, fluentd_tag)
else:
self.sender = None
self._init_message_handler()
self.start_ipython_kernel()
def _init_message_handler(self):
def handler(self, stream, ident, parent):
self.log.debug("Received shell message: %s", str(parent))
msg_type = parent['msg_type']
content = parent['content']
self._hook_request_msg(parent)
self.idle_event.clear()
self.keyboard_interrupt = False
msg = self.kc.session.msg(msg_type, content)
msgid = msg['header']['msg_id']
self.log.debug("save parent_header: %s => %s", msgid, str(parent['header']))
self.parent_headers[msgid] = parent['header']
self.kc.shell_channel.send(msg)
reply_msg = None
if msg_type in self.blocking_msg_types:
while True:
try:
reply_msg = self.kc._recv_reply(msgid, timeout=None)
break
except KeyboardInterrupt:
self.log.debug("KeyboardInterrupt", exc_info=True)
# propagate SIGINT to wrapped kernel
self.km.interrupt_kernel()
self.keyboard_interrupt = True
# this timer fire when the ipython kernel didnot interrupt within 5.0 sec.
self.timer = Timer(5.0, self.close_files)
self.log.debug('>>>>> close files: timer fired')
self.timer.start()
reply_msg_content = self._hook_reply_msg(reply_msg)
self.log.debug('reply: %s', reply_msg)
reply_msg = self.session.send(stream,
reply_msg['msg_type'],
reply_msg_content,
parent, ident,
header=reply_msg['header'],
metadata=reply_msg['metadata'],
buffers=reply_msg['buffers'])
self._post_send_reply_msg(parent, reply_msg)
self._wait_for_idle(msgid)
self._post_wait_for_idle(parent, reply_msg)
for msg_type in self.msg_types:
if msg_type == 'kernel_info_request':
continue
if msg_type == 'shutdown_request':
continue
self.log.debug('override shell message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.log.debug('init shell comm message handler: msg_type=%s', msg_type)
if PY3:
setattr(self, msg_type, MethodType(handler, self))
else:
setattr(self, msg_type, MethodType(handler, self, type(self)))
self.shell_handlers[msg_type] = getattr(self, msg_type)
def start_ipython_kernel(self):
kernel_name = self._get_wrapped_kernel_name()
self.km = KernelManager(kernel_name=kernel_name,
client_class='jupyter_client.blocking.BlockingKernelClient')
self.log.debug('kernel_manager: %s', str(self.km))
self.log.info('start wrapped kernel: %s', kernel_name)
self.km.start_kernel()
self.kc = self.km.client()
self.log.debug('kernel_client: %s', str(self.kc))
self.log.debug('start_channels')
self.kc.start_channels()
try:
self.log.debug('wait for ready of wrapped kernel')
self.kc.wait_for_ready(timeout=None)
except RuntimeError:
self.kc.stop_channels()
self.km.shutdown_kernel()
raise
for channel in self.proxy_channles:
stream = getattr(self, channel + '_socket')
thread = ChannelReaderThread(self, self.kc, stream, self.session, channel)
thread.start()
self.threads[channel] = thread
for log_dir in self.log_dirs:
if self._is_writable_dir(log_dir):
self.log_path = log_dir
break
self.log.debug('log output directory: %s', self.log_path)
if self._find_default_keyword_pattern_file() is None:
self.log.info('default keyword pattern file "%s" not found', IPYTHON_DEFAULT_PATTERN_FILE)
try:
self._generate_default_keyword_pattern_file()
except Exception as e:
self.log.exception("failed to generate default keyword pattern file: %s", e)
self.exec_info = None
self.notebook_path = self.get_notebook_path()
self.log.debug('notebook_path: %s', self.notebook_path)
def _is_writable_dir(self, path):
temp_dir = None
try:
if not os.path.exists(path):
os.makedirs(path)
temp_dir = tempfile.mkdtemp(dir=path)
return True
except (OSError, IOError) as e:
self.log.debug("_is_writable_dir: %s", e)
return False
finally:
if temp_dir is not None:
os.rmdir(temp_dir)
def _get_wrapped_kernel_name(self, km):
raise NotImplementedError()
def _remove_parent_header(self, msg_id):
if msg_id in self.parent_headers:
parent_header = self.parent_headers[msg_id]
self.log.debug("remove parent_header: %s => %s", msg_id, str(parent_header))
del self.parent_headers[msg_id]
def _hook_request_msg(self, parent):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
self._hook_execute_request_msg(parent)
def _hook_execute_request_msg(self, parent):
try:
content = parent[u'content']
code = py3compat.cast_unicode_py2(content[u'code'])
silent = content[u'silent']
allow_stdin = content.get('allow_stdin', False)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", parent)
return
self.execute_request_msg_id = parent['header']['msg_id']
if not silent:
self.execution_count += 1
cell_full_id = self._get_cell_id(parent)
if cell_full_id is not None:
cell_uuid, _ = self._parse_cell_id(cell_full_id)
self.log_history_file_path = os.path.join(self.log_path,
cell_uuid,
cell_uuid + u'.json')
else:
self.log_history_file_path = None
self.log_history_id = cell_full_id
self.log_history_data = self._read_log_history_file()
notebook_data = self._get_notebook_data(parent)
self.exec_info = ExecutionInfo(code, self.get_server_signature(), notebook_data)
if not silent:
env = self._get_config()
self.summarize_on, new_code = self.is_summarize_on(code, env)
self._init_default_config()
self._start_log()
if self.summarize_on:
self._start_summarize()
self._load_env(env)
if not self.log_history_id is None:
meme = {'lc_cell_meme': {'current': self.log_history_id}}
self.log_buff_append(u'{}\n----\n'.format(json.dumps(meme)))
self.log_buff_append(u'{}\n----\n'.format(code)) # code
self._log_buff_flush()
self.log_buff_append(self.exec_info.to_logfile_header() + u'----\n')
content[u'code'] = new_code
self._allow_stdin = allow_stdin
def _hook_reply_msg(self, reply_msg):
if reply_msg['msg_type'] == 'execute_reply':
return self._hook_execute_reply_msg(reply_msg)
return reply_msg['content']
def _hook_execute_reply_msg(self, reply):
if hasattr(self, "timer"):
self.timer.cancel()
self.log.debug('>>>>> close files: timer cancelled')
content = reply['content']
content['execution_count'] = self.execution_count
content['lc_wrapper'] = {
'log_path': self.file_full_path
}
self.exec_info.execute_reply_status = content['status']
return content
def _post_send_reply_msg(self, parent, reply_msg):
msg_type = parent['msg_type']
if msg_type == 'execute_request':
content = parent['content']
silent = content['silent']
stop_on_error = content.get('stop_on_error', True)
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
run_sync(self._abort_queues())
def _post_wait_for_idle(self, parent, reply_msg):
if reply_msg is None:
return
if reply_msg['msg_type'] == 'execute_reply':
self.log.debug('flushing stdout stream')
self._send_last_stdout_stream_text()
self.log.debug('flushed stdout stream')
self.execute_request_msg_id = None
def _hook_iopub_msg(self, parent_header, msg):
msg_id = parent_header['msg_id']
content = msg['content']
# replace msg_id in the content
self._replace_msg_id(msg_id, msg['parent_header']['msg_id'], content)
if self.execute_request_msg_id == msg_id:
return self._output_hook(msg)
return content
def _replace_msg_id(self, msg_id, wrapped_msg_id, content):
for k, v in content.items():
if isinstance(v, dict):
self._replace_msg_id(msg_id, wrapped_msg_id, v)
elif v == wrapped_msg_id:
content[k] = msg_id
self.log.debug('replace msg_id in content: %s => %s',
wrapped_msg_id, msg_id)
def _write_log(self, msg):
if not msg is None:
self.log_file_object.write(msg)
self.exec_info.file_size = self.log_file_object.tell()
def open_log_file(self, path):
self.log.debug('>>>>> open_log_file')
now = datetime.now(dateutil.tz.tzlocal())
path = os.path.join(path, now.strftime("%Y%m%d"))
if not os.path.exists(path):
os.makedirs(path)
file_name = now.strftime("%Y%m%d-%H%M%S") + "-%04d" % (now.microsecond // 1000)
self.file_full_path = os.path.join(path, file_name + u'.log')
self.exec_info.log_path = self.file_full_path
self.log_file_object = io.open(self.file_full_path, "a", encoding='utf-8')
self.log.debug(self.file_full_path)
self.log.debug(self.log_file_object)
def close_log_file(self):
self.log.debug('>>>>> close_log_file')
if self.log_file_object is None:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is None')
return
if not self.log_file_object.closed:
self.log.debug('>>>>> log file closed')
self.log_file_object.close()
self.send_fluent_log()
else:
self.log.debug('>>>>> close_log_file: not executed because self.log_file_object is already closed')
self.log.debug('close_log_file: self.log_file_object = None')
self.log_file_object = None
def send_fluent_log(self):
if self.sender is None:
return
self.log.debug('>>>>> send_fluent_log')
record = {}
with io.open(self.exec_info.log_path, 'r') as f:
record['log'] = f.read()
self.sender.emit(None, record)
self.log.info('lc_wrapper: send_fluent_log: cell_meme=%s, uid=%s, gid=%s',
self.log_history_id, os.getuid(), os.getgid(), self.get_server_signature())
def get_server_signature(self):
if os.path.exists(self.server_signature_file):
with io.open(self.server_signature_file, 'r') as f:
return f.read()
else:
return None
def _wait_for_idle(self, msg_id):
self.log.debug('waiting for idle: msg_id=%s', msg_id)
while True:
self.idle_event.wait()
if self.idle_parent_header['msg_id'] != msg_id:
self.log.warn('unexpected idle message received: expected msg_id=%s, received msg_id=%s',
msg_id, self.idle_parent_header['msg_id'])
continue
self.log.debug('idle: msg_id=%s', msg_id)
return
def get_notebook_path(self):
return getcwd()
def _find_config_file(self):
for path in self.configfile_paths:
if os.path.exists(path):
return path
return None
def _get_config(self):
env = os.environ
config_path = self._find_config_file()
if config_path is None:
return env
line_pattern = re.compile(r'(\S+)=(".*?"|\S+)')
config = {}
with io.open(config_path, 'r', encoding='utf-8') as f:
for l in f.readlines():
l = l.strip()
if len(l) == 0 or l.startswith('#'):
continue
m = line_pattern.match(l)
if m:
config[m.group(1)] = m.group(2)
else:
self.log.warning('Unexpected line: {} at {}'.format(l, config_path))
for k, v in env.items():
config[k] = v
return config
def send_clear_content_msg(self):
clear_content = {'wait': True}
self.session.send(self.iopub_socket, 'clear_output', clear_content, self._parent_header,
ident=None, buffers=None, track=False, header=None, metadata=None)
def _load_env(self, env):
summarize = env.get(SUMMARIZE_KEY, '')
self.log.debug("lc_wrapper = " + summarize)
summarize_pattern = re.compile(r'^([0-9]*):([0-9]*):([0-9]*):([0-9]*)$')
summarize_params = summarize_pattern.match(summarize)
if summarize_params is not None and len(summarize_params.group(1)) != 0:
self.summarize_start_lines = int(summarize_params.group(1))
if summarize_params is not None and len(summarize_params.group(2)) != 0:
self.summarize_header_lines = int(summarize_params.group(2))
if summarize_params is not None and len(summarize_params.group(3)) != 0:
self.summarize_exec_lines = int(summarize_params.group(3))
if summarize_params is not None and len(summarize_params.group(4)) != 0:
self.summarize_footer_lines = int(summarize_params.group(4))
self.summarize_start_lines = max(self.summarize_start_lines,
self.summarize_header_lines + \
self.summarize_footer_lines + 1)
self.log_history_data = self._read_log_history_file()
if MASKING_KEY in env:
self.masking_pattern = re.compile(env.get(MASKING_KEY))
else:
self.masking_pattern = None
if LOG_MASKING_KEY in env:
self.log_mask = env.get(LOG_MASKING_KEY)
else:
self.log_mask = 'on'
self.repatter = []
text = env.get(IGNORE_SUMMARIZE_KEY, 'file:default')
if text is None or len(text) == 0:
pass
elif 'file:' in text:
file_name = text[text.rfind('find:')+6:].strip()
if file_name == 'default':
file_path = self._find_default_keyword_pattern_file()
else:
file_path = os.path.join(self.notebook_path, file_name)
if file_path is None:
self.keyword_buff_append(u'error : {} Not found'.format(IPYTHON_DEFAULT_PATTERN_FILE))
self.log.warning('lc_wrapper_regex: %s Not found', IPYTHON_DEFAULT_PATTERN_FILE)
elif os.path.exists(file_path):
try:
patterns = self._read_keyword_pattern_file(file_path)
for ptxt in patterns:
self.repatter.append(re.compile(ptxt))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
else:
self.keyword_buff_append(u'error : {} Not found'.format(file_path))
self.log.warning('lc_wrapper_regex: %s Not found', file_path)
else:
try:
self.repatter.append(re.compile(text))
except Exception as e:
self.keyword_buff_append(u'error : ' + str(e))
self.log.exception("lc_wrapper_regex: %s", e)
def _find_default_keyword_pattern_file(self):
for path in self.keyword_pattern_file_paths:
if os.path.exists(path):
return path
return None
def _read_keyword_pattern_file(self, filename):
with open(filename, 'r') as file:
patterns = file.readlines()
patterns = [x.strip() for x in patterns if len(x.strip()) > 0]
self.log.debug('patterns :')
for patt in patterns:
self.log.debug(patt)
return patterns
def _generate_default_keyword_pattern_file(self):
error = None
self.log.info('generate default keyword pattern file')
for path in self.keyword_pattern_file_paths:
if not os.path.exists(path):
try:
with open(path, 'w') as f:
f.write(IPYTHON_DEFAULT_PATTERN)
self.log.info('generated default keyword pattern file: %s', path)
return
except Exception as e:
self.log.debug('_generate_default_keyword_pattern_file: %s', str(e))
error = e
if error is not None:
raise error
def is_summarize_on(self, code, env):
force = None
if FORCE_SUMMARIZE_KEY in env:
force_text = env[FORCE_SUMMARIZE_KEY].strip().lower()
if force_text == 'on':
force = True
elif force_text == 'off':
force = False
regx = r'^\s*!!'
m = re.match(regx, code, re.M)
if m:
return (force if force is not None else True,
code[m.end():])
else:
return (force if force is not None else False,
code)
def _log_buff_flush(self, force=False):
if force or len(self.log_buff) > 100:
self._write_log(u''.join(self.log_buff))
del self.log_buff[:]
def log_buff_append(self, text=None):
if self.block_messages:
return
if not text is None:
if isinstance(text, list):
self.log_buff.extend(text)
else:
self.log_buff.append(text)
def keyword_buff_append(self, text, highlight=True):
if isinstance(text, list):
self.keyword_buff.extend([u'\033[0;31m{}\033[0m'.format(t)
if highlight else t for t in text])
else:
self.keyword_buff.append(u'\033[0;31m{}\033[0m'.format(text)
if highlight else text)
def display_keyword_buff(self):
if len(self.keyword_buff) == 0:
return ''
stream_text = u'...\n'
stream_text += u'\n'.join(self.keyword_buff[:self.summarize_header_lines * 2]) + '\n'
if len(self.keyword_buff) <= self.summarize_header_lines * 2:
return stream_text
msg = u'Matched lines exceed maximum number of view ({})' \
.format(self.summarize_header_lines * 2)
stream_text += u'\033[0;31m{}\033[0m\n'.format(msg)
return stream_text
def highlight_keywords(self, text):
matched = [p.search(text) for p in self.repatter]
matched = [m for m in matched if m is not None]
if len(matched) == 0:
return None
remain = text
result = None
while len(matched) > 0:
left = min([m.start() for m in matched])
if result is None:
result = remain[:left]
else:
result += remain[:left]
keywords = [m.group() for m in matched if m.start() == left]
keyword = sorted(keywords, key=lambda s: len(s))[-1]
result += u'\033[0;31m{}\033[0m'.format(keyword)
remain = remain[left + len(keyword):]
matched = [p.search(remain) for p in self.repatter]
matched = [m for m in matched if m is not None]
return result + remain
def _read_log_history_file(self):
if self.log_history_file_path is not None and \
os.path.exists(self.log_history_file_path):
with open(self.log_history_file_path, 'r') as f:
data = json.load(f)
return data
else:
return []
def _write_log_history_file(self, data):
if self.log_history_file_path is None:
self.log.debug('Skipped to save log history')
return
data.append(self.exec_info.to_log())
pathdir = os.path.dirname(self.log_history_file_path)
if not os.path.exists(pathdir):
os.makedirs(pathdir)
log_full_dir, log_filename = os.path.split(self.file_full_path)
log_full_dir, log_dirname = os.path.split(log_full_dir)
os.symlink(os.path.join('..', log_dirname, log_filename),
os.path.join(pathdir, os.path.basename(self.file_full_path)))
with open(self.log_history_file_path, 'w') as f:
json.dump(data, f)
self.log.debug('Log history saved: {}'.format(self.log_history_file_path))
self.log_history_file_path = None
def close_files(self):
self.log.debug('>>>>> close_files')
if self.log_file_object is not None:
self.exec_info.finished(len(self.keyword_buff))
self.log_buff_append(u'\n----\n{}----\n'.format(self.exec_info.to_logfile_footer()))
for result in self.result_files:
self.log_buff_append(u'result: {}\n'.format(result))
self.log_buff_append(u'execute_reply_status: {}\n'.format(self.exec_info.execute_reply_status))
self.block_messages = True
self._log_buff_flush(force=True)
self.close_log_file()
#save log file path
self._write_log_history_file(self.log_history_data)
def _init_default_config(self):
self.summarize_start_lines = 50
self.summarize_header_lines = 20
self.summarize_exec_lines = 1
self.summarize_footer_lines = 20
def _start_summarize(self):
self.count = 0
self.summarize_header_buff = []
self.summarize_last_buff = []
def _start_log(self):
self.block_messages = False
self.log_buff = []
self.keyword_buff = []
self.result_files = []
self.file_full_path = None
self.log_file_object = None
self.open_log_file(self.log_path)
def _store_result(self, result):
if self.file_full_path is None:
self.log.error('Log file already closed. Skip to store results')
return
log_dir, log_name = os.path.split(self.file_full_path)
log_name_body, _ = os.path.splitext(log_name)
result_file = os.path.join(log_dir,
u'{}-{}.pkl'.format(log_name_body,
len(self.result_files)))
with open(result_file, 'wb') as f:
pickle.dump(result, f)
self.result_files.append(result_file)
def _store_last_lines(self, content_text_list):
# save the last few lines
lines = max(self.summarize_footer_lines, self.summarize_start_lines)
if len(content_text_list) < lines:
if len(content_text_list) + len(self.summarize_last_buff) > lines:
del self.summarize_last_buff[:len(content_text_list)]
self.summarize_last_buff.extend(content_text_list)
else:
del self.summarize_last_buff[:]
self.summarize_last_buff.extend(content_text_list[-lines:])
def _output_hook(self, msg=None):
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream':
if 'ExecutionResult' in content['text']:
return content
else:
masked_text = self._mask_lines(content['text'])
if self.log_mask == 'on':
self.log_buff_append(masked_text)
else :
self.log_buff_append(content['text'])
self._log_buff_flush()
content['text'] = masked_text
content_text_list = content['text'].splitlines(False) # with LF
# save the stderr messages
if content['name'] == 'stderr':
self.keyword_buff_append(content_text_list)
# save the sentences the keyword matched
elif not self.repatter is None and len(self.repatter) > 0:
for text in content_text_list:
matched = self.highlight_keywords(text)
if matched is not None:
self.keyword_buff_append(matched, highlight=False)
if self.summarize_on:
return self._summarize_stream_output(msg, content, content_text_list)
return content
elif msg_type in ('display_data', 'execute_result'):
execute_result = content.copy()
execute_result['execution_count'] = self.execution_count
self._store_result({'msg_type': msg_type, 'content': execute_result})
return execute_result
elif msg_type == 'error':
error_result = content.copy()
error_result['execution_count'] = self.execution_count
if self.log_mask != 'on':
self._store_result({'msg_type': msg_type, 'content': error_result})
for i in range(len(error_result['traceback'])):
error_result['traceback'][i] = self._mask_lines(error_result['traceback'][i])
error_result['evalue'] = self._mask_lines(error_result['evalue'])
if self.log_mask == 'on':
self._store_result({'msg_type': msg_type, 'content': error_result})
return error_result
return content
def _summarize_stream_output(self, msg, content, lines):
# save the first few lines
if len(self.summarize_header_buff) < self.summarize_header_lines:
self.summarize_header_buff.extend(lines)
self._store_last_lines(lines)
if self.count < self.summarize_start_lines:
self.count += len(lines)
stream_content = {'name': content['name'], 'text': content['text']}
else:
self._log_buff_flush()
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream() + u'----\n'
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(lines[:self.summarize_exec_lines]))
stream_content = {'name': 'stdout', 'text': stream_text}
return stream_content
def _send_last_stdout_stream_text(self):
self.log.debug('_flush_stdout_stream')
self.close_files()
if self.summarize_on:
self._send_last_summarized_stdout_stream_text()
self.result_files = []
def _send_last_summarized_stdout_stream_text(self):
self.send_clear_content_msg()
stream_text = u''
stream_text += self.exec_info.to_stream(len(self.log_history_data)) + u'----\n'
if self.count < self.summarize_start_lines:
stream_text += u'\n'.join(self.summarize_last_buff)
else:
stream_text += u'{}\n'.format('\n'.join(self.summarize_header_buff[:self.summarize_header_lines]))
stream_text += self.display_keyword_buff()
stream_text += u'...\n'
stream_text += u'{}'.format('\n'.join(self.summarize_last_buff[-self.summarize_footer_lines:]))
stream_content = {'name': 'stdout', 'text': stream_text}
self.send_response(self.iopub_socket, 'stream', stream_content)
# Send exeuction result again because last result can be cleared
for resultf in self.result_files:
with open(resultf, 'rb') as f:
result = pickle.load(f)
self.session.send(self.iopub_socket,
result['msg_type'],
result['content'],
self._parent_header,
ident=None,
buffers=None,
track=False,
header=None,
metadata=None)
def _mask_lines(self, string):
if not hasattr(self, 'masking_pattern'):
return string
elif self.masking_pattern is None:
return string
else:
pattern = self.masking_pattern
def asterisks_repl(match):
return len(match[0]) * '*'
return re.sub(pattern, asterisks_repl, string)
def _get_cell_id(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_cell_data' not in content:
return None
lc_cell_data = content['lc_cell_data']
if 'lc_cell_meme' not in lc_cell_data:
return None
lc_cell_meme = lc_cell_data['lc_cell_meme']
if 'current' not in lc_cell_meme:
return None
return lc_cell_meme['current']
def _parse_cell_id(self, cell_id):
parts = cell_id.split('-')
return '-'.join(parts[:5]), '-'.join(parts[5:])
def _get_notebook_data(self, parent):
if 'content' not in parent:
return None
content = parent['content']
if 'lc_notebook_data' not in content:
return None
return content['lc_notebook_data']
def do_shutdown(self, restart):
self.log.debug('>>>>> do_shutdown')
self.close_files()
if self.sender is not None:
self.log.debug('close fluent logger sender')
self.sender.close()
self.log.info('stopping wrapped kernel')
if hasattr(self, "km"):
self.km.shutdown_kernel(restart=restart)
for channel, thread in self.threads.items():
self.log.info('stopping %s ChannelReaderThread', channel)
thread.stop()
return {'status': 'ok', 'restart': restart}
class LCWrapperKernelManager(IOLoopKernelManager):
"""Kernel manager for LC_wrapper kernel"""
def shutdown_kernel(self, now=False, restart=False):
# Stop monitoring for restarting while we shutdown.
self.stop_restarter()
self.log.debug("Interrupting the wrapper kernel and its subprocesses")
self.interrupt_kernel()
time.sleep(5.0)
if now:
self._kill_kernel()
else:
self.request_shutdown(restart=restart)
# Don't send any additional kernel kill messages immediately, to give
# the kernel a chance to properly execute shutdown actions. Wait for at
# most 1s, checking every 0.1s.
self.finish_shutdown()
self.cleanup_resources(restart=restart)
| |
import sys
from peewee import ModelQueryResultWrapper
from peewee import NaiveQueryResultWrapper
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import skip_test_if
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestQueryResultWrapper(ModelTestCase):
requires = [User, Blog, Comment]
def test_iteration(self):
User.create_users(10)
with self.assertQueryCount(1):
sq = User.select()
qr = sq.execute()
first_five = []
for i, u in enumerate(qr):
first_five.append(u.username)
if i == 4:
break
self.assertEqual(first_five, ['u1', 'u2', 'u3', 'u4', 'u5'])
another_iter = [u.username for u in qr]
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
another_iter = [u.username for u in qr]
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
def test_count(self):
User.create_users(5)
with self.assertQueryCount(1):
query = User.select()
qr = query.execute()
self.assertEqual(qr.count, 5)
# Calling again does not incur another query.
self.assertEqual(qr.count, 5)
with self.assertQueryCount(1):
query = query.where(User.username != 'u1')
qr = query.execute()
self.assertEqual(qr.count, 4)
# Calling again does not incur another query.
self.assertEqual(qr.count, 4)
# TODO: Fix this.
@skip_test_if(lambda: True)
def test_nested_iteration(self):
User.create_users(4)
with self.assertQueryCount(1):
sq = User.select()
outer = []
inner = []
for i_user in sq:
outer.append(i_user.username)
for o_user in sq:
inner.append(o_user.username)
self.assertEqual(outer, ['u1', 'u2', 'u3', 'u4'])
self.assertEqual(inner, ['u1', 'u2', 'u3', 'u4'])
def test_iteration_protocol(self):
User.create_users(3)
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
qr = query.execute()
for user in qr:
pass
self.assertRaises(StopIteration, next, qr)
self.assertEqual([u.username for u in qr], ['u1', 'u2', 'u3'])
self.assertEqual(query[0].username, 'u1')
self.assertEqual(query[2].username, 'u3')
self.assertRaises(StopIteration, next, qr)
def test_iterator(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id).execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
self.assertTrue(qr._populated)
self.assertEqual(qr._result_cache, [])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
with self.assertQueryCount(1):
qr = User.select().where(User.username == 'xxx').execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, [])
def test_iterator_query_method(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id)
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
def test_iterator_extended(self):
User.create_users(10)
for i in range(1, 4):
for j in range(i):
Blog.create(
title='blog-%s-%s' % (i, j),
user=User.get(User.username == 'u%s' % i))
qr = (User
.select(
User.username,
fn.Count(Blog.pk).alias('ct'))
.join(Blog)
.where(User.username << ['u1', 'u2', 'u3'])
.group_by(User)
.order_by(User.id)
.naive())
accum = []
with self.assertQueryCount(1):
for user in qr.iterator():
accum.append((user.username, user.ct))
self.assertEqual(accum, [
('u1', 1),
('u2', 2),
('u3', 3)])
qr = (User
.select(fn.Count(User.id).alias('ct'))
.group_by(User.username << ['u1', 'u2', 'u3'])
.order_by(fn.Count(User.id).desc()))
accum = []
with self.assertQueryCount(1):
for ct, in qr.tuples().iterator():
accum.append(ct)
self.assertEqual(accum, [7, 3])
def test_fill_cache(self):
def assertUsernames(qr, n):
self.assertEqual([u.username for u in qr._result_cache], ['u%d' % i for i in range(1, n+1)])
User.create_users(20)
with self.assertQueryCount(1):
qr = User.select().execute()
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# a subsequent call will not "over-fill"
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# ask for one more and ye shall receive
qr.fill_cache(6)
self.assertFalse(qr._populated)
assertUsernames(qr, 6)
qr.fill_cache(21)
self.assertTrue(qr._populated)
assertUsernames(qr, 20)
self.assertRaises(StopIteration, next, qr)
def test_select_related(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
c11 = Comment.create(blog=b1, comment='c11')
c12 = Comment.create(blog=b1, comment='c12')
c21 = Comment.create(blog=b2, comment='c21')
c22 = Comment.create(blog=b2, comment='c22')
# missing comment.blog_id
comments = (Comment
.select(Comment.id, Comment.comment, Blog.pk, Blog.title)
.join(Blog)
.where(Blog.title == 'b1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
# missing blog.pk
comments = (Comment
.select(Comment.id, Comment.comment, Comment.blog, Blog.title)
.join(Blog)
.where(Blog.title == 'b2')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b2', 'b2'])
# both but going up 2 levels
comments = (Comment
.select(Comment, Blog, User)
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, ModelQueryResultWrapper))
comments = (Comment
.select()
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(5):
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, NaiveQueryResultWrapper))
# Go up two levels and use aliases for the joined instances.
comments = (Comment
.select(Comment, Blog, User)
.join(Blog, on=(Comment.blog == Blog.pk).alias('bx'))
.join(User, on=(Blog.user == User.id).alias('ux'))
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.bx.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.bx.ux.username for c in comments], ['u1', 'u1'])
def test_naive(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertTrue(isinstance(users._qr, NaiveQueryResultWrapper))
users = User.select(User, Blog).join(Blog).naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertEqual([u.title for u in users], ['b1', 'b2'])
query = Blog.select(Blog, User).join(User).order_by(Blog.title).naive()
self.assertEqual(query.get().user, User.get(User.username == 'u1'))
def test_tuples_dicts(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().tuples().order_by(User.id)
self.assertEqual([r for r in users], [
(u1.id, 'u1'),
(u2.id, 'u2'),
])
users = User.select().dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1'},
{'id': u2.id, 'username': 'u2'},
])
users = User.select(User, Blog).join(Blog).order_by(User.id).tuples()
self.assertEqual([r for r in users], [
(u1.id, 'u1', b1.pk, u1.id, 'b1', '', None),
(u2.id, 'u2', b2.pk, u2.id, 'b2', '', None),
])
users = User.select(User, Blog).join(Blog).order_by(User.id).dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1', 'pk': b1.pk, 'user': u1.id, 'title': 'b1', 'content': '', 'pub_date': None},
{'id': u2.id, 'username': 'u2', 'pk': b2.pk, 'user': u2.id, 'title': 'b2', 'content': '', 'pub_date': None},
])
def test_slicing_dicing(self):
def assertUsernames(users, nums):
self.assertEqual([u.username for u in users], ['u%d' % i for i in nums])
User.create_users(10)
with self.assertQueryCount(1):
uq = User.select().order_by(User.id)
for i in range(2):
res = uq[0]
self.assertEqual(res.username, 'u1')
with self.assertQueryCount(0):
for i in range(2):
res = uq[1]
self.assertEqual(res.username, 'u2')
with self.assertQueryCount(0):
for i in range(2):
res = uq[:3]
assertUsernames(res, [1, 2, 3])
with self.assertQueryCount(0):
for i in range(2):
res = uq[2:5]
assertUsernames(res, [3, 4, 5])
with self.assertQueryCount(0):
for i in range(2):
res = uq[5:]
assertUsernames(res, [6, 7, 8, 9, 10])
self.assertRaises(IndexError, uq.__getitem__, 10)
self.assertRaises(ValueError, uq.__getitem__, -1)
with self.assertQueryCount(0):
res = uq[10:]
self.assertEqual(res, [])
def test_indexing_fill_cache(self):
def assertUser(query_or_qr, idx):
self.assertEqual(query_or_qr[idx].username, 'u%d' % (idx + 1))
User.create_users(10)
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
# Ensure we can grab the first 5 users in 1 query.
for i in range(5):
assertUser(uq, i)
# Iterate in reverse and ensure only costs 1 query.
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
for i in reversed(range(10)):
assertUser(uq, i)
# Execute the query and get reference to result wrapper.
query = User.select().order_by(User.id)
query.execute()
qr = query._qr
# Getting the first user will populate the result cache with 1 obj.
assertUser(query, 0)
self.assertEqual(len(qr._result_cache), 1)
# Getting the last user will fill the cache.
assertUser(query, 9)
self.assertEqual(len(qr._result_cache), 10)
def test_prepared(self):
for i in range(2):
u = User.create(username='u%d' % i)
for j in range(2):
Blog.create(title='b%d-%d' % (i, j), user=u, content='')
for u in User.select():
# check prepared was called
self.assertEqual(u.foo, u.username)
for b in Blog.select(Blog, User).join(User):
# prepared is called for select-related instances
self.assertEqual(b.foo, b.title)
self.assertEqual(b.user.foo, b.user.username)
def test_aliasing_values(self):
User.create_users(2)
q = User.select(User.username.alias('xx')).order_by(User.username)
results = [row for row in q.dicts()]
self.assertEqual(results, [
{'xx': 'u1'},
{'xx': 'u2'}])
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Force ModelQueryResultWrapper.
q = (User
.select(User.username.alias('xx'), Blog.pk)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username))
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Use Model and Field aliases.
UA = User.alias()
q = (User
.select(
User.username.alias('x'),
UA.username.alias('y'))
.join(UA, on=(User.id == UA.id).alias('z'))
.order_by(User.username))
results = [(user.x, user.z.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
q = q.naive()
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
uq = User.select(User.id, User.username).alias('u2')
q = (User
.select(
User.username.alias('x'),
uq.c.username.alias('y'))
.join(uq, on=(User.id == uq.c.id))
.order_by(User.username))
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
class TestJoinedInstanceConstruction(ModelTestCase):
requires = [Blog, User]
def setUp(self):
super(TestJoinedInstanceConstruction, self).setUp()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
Blog.create(user=u1, title='b1')
Blog.create(user=u2, title='b2')
def test_fk_missing_pk(self):
# Not enough information.
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNone(blog.user.id)
self.assertIsNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_fk_with_pk(self):
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username, User.id)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNotNone(blog.user.id)
self.assertIsNotNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_backref_missing_pk(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username, Blog.title))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertIsNone(user.id)
self.assertIsNone(user.blog.pk)
self.assertIsNone(user.blog.user_id)
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_fk_join_expr(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog, on=(User.id == Blog.user).alias('bx'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.bx.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User, on=(Blog.user == User.id).alias('ux'))
.order_by(Blog.title))
results = []
for blog in q:
results.append((blog.title, blog.ux.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_aliases(self):
B = Blog.alias()
U = User.alias()
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=(U.id == B.user))
.order_by(U.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=(B.user == U.id))
.order_by(B.title))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_subqueries(self):
uq = User.select()
bq = Blog.select(Blog.title, Blog.user).alias('bq')
with self.assertQueryCount(1):
q = (User
.select(User, bq.c.title.bind_to(Blog))
.join(bq, on=(User.id == bq.c.user_id).alias('blog'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
class TestQueryResultTypeConversion(ModelTestCase):
requires = [User]
def setUp(self):
super(TestQueryResultTypeConversion, self).setUp()
for i in range(3):
User.create(username='u%d' % i)
def assertNames(self, query, expected, attr='username'):
id_field = query.model_class.id
self.assertEqual(
[getattr(item, attr) for item in query.order_by(id_field)],
expected)
def test_simple_select(self):
query = UpperUser.select()
self.assertNames(query, ['U0', 'U1', 'U2'])
query = User.select()
self.assertNames(query, ['u0', 'u1', 'u2'])
def test_with_alias(self):
# Even when aliased to a different attr, the column is coerced.
query = UpperUser.select(UpperUser.username.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
def test_scalar(self):
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar(convert=True))
self.assertEqual(max_username, 'U2')
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar())
self.assertEqual(max_username, 'u2')
def test_function(self):
substr = fn.SubStr(UpperUser.username, 1, 3)
# Being the first parameter of the function, it meets the special-case
# criteria.
query = UpperUser.select(substr.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('username'))
self.assertNames(query, ['u0', 'u1', 'u2'])
query = UpperUser.select(fn.Lower(UpperUser.username).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.Lower(UpperUser.username).alias('username').coerce(False))
self.assertNames(query, ['u0', 'u1', 'u2'])
# Since it is aliased to an existing column, we will use that column's
# coerce.
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
class TestModelQueryResultWrapper(ModelTestCase):
requires = [TestModelA, TestModelB, TestModelC, User, Blog]
data = (
(TestModelA, (
('pk1', 'a1'),
('pk2', 'a2'),
('pk3', 'a3'))),
(TestModelB, (
('pk1', 'b1'),
('pk2', 'b2'),
('pk3', 'b3'))),
(TestModelC, (
('pk1', 'c1'),
('pk2', 'c2'))),
)
def setUp(self):
super(TestModelQueryResultWrapper, self).setUp()
for model_class, model_data in self.data:
for pk, data in model_data:
model_class.create(field=pk, data=data)
def test_join_expr(self):
def get_query(join_type=JOIN.INNER):
sq = (TestModelA
.select(TestModelA, TestModelB, TestModelC)
.join(
TestModelB,
on=(TestModelA.field == TestModelB.field).alias('rel_b'))
.join(
TestModelC,
join_type=join_type,
on=(TestModelB.field == TestModelC.field))
.order_by(TestModelA.field))
return sq
sq = get_query()
self.assertEqual(sq.count(), 2)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
sq = get_query(JOIN.LEFT_OUTER)
self.assertEqual(sq.count(), 3)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'), ('b3', None))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
def test_backward_join(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
for user in (u1, u2):
Blog.create(title='b-%s' % user.username, user=user)
# Create an additional blog for user 2.
Blog.create(title='b-u2-2', user=u2)
res = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username.asc(), Blog.title.asc()))
self.assertEqual([(u.username, u.blog.title) for u in res], [
('u1', 'b-u1'),
('u2', 'b-u2'),
('u2', 'b-u2-2')])
def test_joins_with_aliases(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1_1 = Blog.create(user=u1, title='b1-1')
b1_2 = Blog.create(user=u1, title='b1-2')
b2_1 = Blog.create(user=u2, title='b2-1')
UserAlias = User.alias()
BlogAlias = Blog.alias()
def assertExpectedQuery(query, is_user_query):
accum = []
with self.assertQueryCount(1):
if is_user_query:
for user in query:
accum.append((user.username, user.blog.title))
else:
for blog in query:
accum.append((blog.user.username, blog.title))
self.assertEqual(accum, [
('u1', 'b1-1'),
('u1', 'b1-2'),
('u2', 'b2-1'),
])
combinations = [
(User, BlogAlias, User.id == BlogAlias.user, True),
(User, BlogAlias, BlogAlias.user == User.id, True),
(User, Blog, User.id == Blog.user, True),
(User, Blog, Blog.user == User.id, True),
(User, Blog, None, True),
(Blog, UserAlias, UserAlias.id == Blog.user, False),
(Blog, UserAlias, Blog.user == UserAlias.id, False),
(Blog, User, User.id == Blog.user, False),
(Blog, User, Blog.user == User.id, False),
(Blog, User, None, False),
]
for Src, JoinModel, predicate, is_user_query in combinations:
query = (Src
.select(Src, JoinModel)
.join(JoinModel, on=predicate)
.order_by(SQL('1, 2')))
assertExpectedQuery(query, is_user_query)
class TestModelQueryResultForeignKeys(ModelTestCase):
requires = [Parent, Child]
def test_foreign_key_assignment(self):
parent = Parent.create(data='p1')
child = Child.create(parent=parent, data='c1')
ParentAlias = Parent.alias()
query = Child.select(Child, ParentAlias)
ljoin = (ParentAlias.id == Child.parent)
rjoin = (Child.parent == ParentAlias.id)
lhs_alias = query.join(ParentAlias, on=ljoin)
rhs_alias = query.join(ParentAlias, on=rjoin)
self.assertJoins(lhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("parent"."id" = "child"."parent_id")'])
self.assertJoins(rhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("child"."parent_id" = "parent"."id")'])
with self.assertQueryCount(1):
lchild = lhs_alias.get()
self.assertEqual(lchild.id, child.id)
self.assertEqual(lchild.parent.id, parent.id)
with self.assertQueryCount(1):
rchild = rhs_alias.get()
self.assertEqual(rchild.id, child.id)
self.assertEqual(rchild.parent.id, parent.id)
class TestSelectRelatedForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def test_select_related(self):
p1 = Package.create(barcode='101')
p2 = Package.create(barcode='102')
pi11 = PackageItem.create(title='p11', package='101')
pi12 = PackageItem.create(title='p12', package='101')
pi21 = PackageItem.create(title='p21', package='102')
pi22 = PackageItem.create(title='p22', package='102')
# missing PackageItem.package_id.
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, Package.barcode)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual(
[i.package.barcode for i in items],
['101', '101'])
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, PackageItem.package, Package.id)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual([i.package.id for i in items], [p1.id, p1.id])
class BaseTestPrefetch(ModelTestCase):
requires = [
User,
Blog,
Comment,
Parent,
Child,
Orphan,
ChildPet,
OrphanPet,
Category,
Post,
Tag,
TagPostThrough,
TagPostThroughAlt,
Category,
UserCategory,
Relationship,
]
user_data = [
('u1', (('b1', ('b1-c1', 'b1-c2')), ('b2', ('b2-c1',)))),
('u2', ()),
('u3', (('b3', ('b3-c1', 'b3-c2')), ('b4', ()))),
('u4', (('b5', ('b5-c1', 'b5-c2')), ('b6', ('b6-c1',)))),
]
parent_data = [
('p1', (
# children
(
('c1', ('c1-p1', 'c1-p2')),
('c2', ('c2-p1',)),
('c3', ('c3-p1',)),
('c4', ()),
),
# orphans
(
('o1', ('o1-p1', 'o1-p2')),
('o2', ('o2-p1',)),
('o3', ('o3-p1',)),
('o4', ()),
),
)),
('p2', ((), ())),
('p3', (
# children
(
('c6', ()),
('c7', ('c7-p1',)),
),
# orphans
(
('o6', ('o6-p1', 'o6-p2')),
('o7', ('o7-p1',)),
),
)),
]
category_tree = [
['root', ['p1', 'p2']],
['p1', ['p1-1', 'p1-2']],
['p2', ['p2-1', 'p2-2']],
['p1-1', []],
['p1-2', []],
['p2-1', []],
['p2-2', []],
]
def setUp(self):
super(BaseTestPrefetch, self).setUp()
for parent, (children, orphans) in self.parent_data:
p = Parent.create(data=parent)
for child_pets in children:
child, pets = child_pets
c = Child.create(parent=p, data=child)
for pet in pets:
ChildPet.create(child=c, data=pet)
for orphan_pets in orphans:
orphan, pets = orphan_pets
o = Orphan.create(parent=p, data=orphan)
for pet in pets:
OrphanPet.create(orphan=o, data=pet)
for user, blog_comments in self.user_data:
u = User.create(username=user)
for blog, comments in blog_comments:
b = Blog.create(user=u, title=blog, content='')
for c in comments:
Comment.create(blog=b, comment=c)
def _build_category_tree(self):
def cc(name, parent=None):
return Category.create(name=name, parent=parent)
root = cc('root')
p1 = cc('p1', root)
p2 = cc('p2', root)
for p in (p1, p2):
for i in range(2):
cc('%s-%s' % (p.name, i + 1), p)
class TestPrefetch(BaseTestPrefetch):
def test_prefetch_simple(self):
sq = User.select().where(User.username != 'u3')
sq2 = Blog.select().where(Blog.title != 'b2')
sq3 = Comment.select()
with self.assertQueryCount(3):
prefetch_sq = prefetch(sq, sq2, sq3)
results = []
for user in prefetch_sq:
results.append(user.username)
for blog in user.blog_set_prefetch:
results.append(blog.title)
for comment in blog.comments_prefetch:
results.append(comment.comment)
self.assertEqual(results, [
'u1', 'b1', 'b1-c1', 'b1-c2',
'u2',
'u4', 'b5', 'b5-c1', 'b5-c2', 'b6', 'b6-c1',
])
with self.assertQueryCount(0):
results = []
for user in prefetch_sq:
for blog in user.blog_set_prefetch:
results.append(blog.user.username)
for comment in blog.comments_prefetch:
results.append(comment.blog.title)
self.assertEqual(results, [
'u1', 'b1', 'b1', 'u4', 'b5', 'b5', 'u4', 'b6',
])
def test_prefetch_reverse(self):
sq = User.select()
sq2 = Blog.select().where(Blog.title != 'b2').order_by(Blog.pk)
with self.assertQueryCount(2):
prefetch_sq = prefetch(sq2, sq)
results = []
for blog in prefetch_sq:
results.append(blog.title)
results.append(blog.user.username)
self.assertEqual(results, [
'b1', 'u1',
'b3', 'u3',
'b4', 'u3',
'b5', 'u4',
'b6', 'u4'])
def test_prefetch_up_and_down(self):
blogs = Blog.select(Blog, User).join(User).order_by(Blog.title)
comments = Comment.select().order_by(Comment.comment.desc())
with self.assertQueryCount(2):
query = prefetch(blogs, comments)
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments_prefetch]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c2', 'b1-c1']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c2', 'b3-c1']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c2', 'b5-c1']),
('u4', 'b6', ['b6-c1']),
])
def test_prefetch_multi_depth(self):
sq = Parent.select()
sq2 = Child.select()
sq3 = Orphan.select()
sq4 = ChildPet.select()
sq5 = OrphanPet.select()
with self.assertQueryCount(5):
prefetch_sq = prefetch(sq, sq2, sq3, sq4, sq5)
results = []
for parent in prefetch_sq:
results.append(parent.data)
for child in parent.child_set_prefetch:
results.append(child.data)
for pet in child.childpet_set_prefetch:
results.append(pet.data)
for orphan in parent.orphan_set_prefetch:
results.append(orphan.data)
for pet in orphan.orphanpet_set_prefetch:
results.append(pet.data)
self.assertEqual(results, [
'p1', 'c1', 'c1-p1', 'c1-p2', 'c2', 'c2-p1', 'c3', 'c3-p1', 'c4',
'o1', 'o1-p1', 'o1-p2', 'o2', 'o2-p1', 'o3', 'o3-p1', 'o4',
'p2',
'p3', 'c6', 'c7', 'c7-p1', 'o6', 'o6-p1', 'o6-p2', 'o7', 'o7-p1',
])
def test_prefetch_no_aggregate(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title))
results = []
for user in query:
results.append((
user.username,
user.blog.title))
self.assertEqual(results, [
('u1', 'b1'),
('u1', 'b2'),
('u2', None),
('u3', 'b3'),
('u3', 'b4'),
('u4', 'b5'),
('u4', 'b6'),
])
def test_prefetch_self_join(self):
self._build_category_tree()
Child = Category.alias()
with self.assertQueryCount(2):
query = prefetch(Category.select().order_by(Category.id), Child)
names_and_children = [
[parent.name, [child.name for child in parent.children_prefetch]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
class TestAggregateRows(BaseTestPrefetch):
def test_aggregate_users(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog, Comment)
.join(Blog, JOIN.LEFT_OUTER)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title, Comment.id)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[(blog.title,
[comment.comment for comment in blog.comments])
for blog in user.blog_set]))
self.assertEqual(results, [
('u1', [
('b1', ['b1-c1', 'b1-c2']),
('b2', ['b2-c1'])]),
('u2', []),
('u3', [
('b3', ['b3-c1', 'b3-c2']),
('b4', [])]),
('u4', [
('b5', ['b5-c1', 'b5-c2']),
('b6', ['b6-c1'])]),
])
def test_aggregate_blogs(self):
with self.assertQueryCount(1):
query = (Blog
.select(Blog, User, Comment)
.join(User)
.switch(Blog)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(Blog.title, User.username, Comment.id)
.aggregate_rows())
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c1', 'b1-c2']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c1', 'b3-c2']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c1', 'b5-c2']),
('u4', 'b6', ['b6-c1']),
])
def test_aggregate_on_expression_join(self):
with self.assertQueryCount(1):
join_expr = (User.id == Blog.user)
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER, on=join_expr)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
])
def test_aggregate_with_join_model_aliases(self):
expected = [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
]
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(
Blog,
JOIN.LEFT_OUTER,
on=(User.id == Blog.user).alias('blogz'))
.order_by(User.id, Blog.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
BlogAlias = Blog.alias()
with self.assertQueryCount(1):
query = (User
.select(User, BlogAlias)
.join(
BlogAlias,
JOIN.LEFT_OUTER,
on=(User.id == BlogAlias.user).alias('blogz'))
.order_by(User.id, BlogAlias.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
def test_aggregate_unselected_join_backref(self):
cat_1 = Category.create(name='category 1')
cat_2 = Category.create(name='category 2')
with test_db.transaction():
for i, user in enumerate(User.select().order_by(User.username)):
if i % 2 == 0:
category = cat_2
else:
category = cat_1
UserCategory.create(user=user, category=category)
with self.assertQueryCount(1):
# The join on UserCategory is a backref join (since the FK is on
# UserCategory). Additionally, UserCategory/Category are not
# selected and are only used for filtering the result set.
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.switch(User)
.join(UserCategory)
.join(Category)
.where(Category.name == cat_1.name)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u2', []),
('u4', ['b5', 'b6']),
])
def test_aggregate_manytomany(self):
p1 = Post.create(title='p1')
p2 = Post.create(title='p2')
Post.create(title='p3')
p4 = Post.create(title='p4')
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
t3 = Tag.create(tag='t3')
TagPostThroughAlt.create(tag=t1, post=p1)
TagPostThroughAlt.create(tag=t2, post=p1)
TagPostThroughAlt.create(tag=t2, post=p2)
TagPostThroughAlt.create(tag=t3, post=p2)
TagPostThroughAlt.create(tag=t1, post=p4)
TagPostThroughAlt.create(tag=t2, post=p4)
TagPostThroughAlt.create(tag=t3, post=p4)
with self.assertQueryCount(1):
query = (Post
.select(Post, TagPostThroughAlt, Tag)
.join(TagPostThroughAlt, JOIN.LEFT_OUTER)
.join(Tag, JOIN.LEFT_OUTER)
.order_by(Post.id, TagPostThroughAlt.post, Tag.id)
.aggregate_rows())
results = []
for post in query:
post_data = [post.title]
for tpt in post.tags_alt:
post_data.append(tpt.tag.tag)
results.append(post_data)
self.assertEqual(results, [
['p1', 't1', 't2'],
['p2', 't2', 't3'],
['p3'],
['p4', 't1', 't2', 't3'],
])
def test_aggregate_parent_child(self):
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, Orphan, ChildPet, OrphanPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan, JOIN.LEFT_OUTER)
.join(OrphanPet, JOIN.LEFT_OUTER)
.order_by(
Parent.data,
Child.data,
ChildPet.id,
Orphan.data,
OrphanPet.id)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set],
[(orphan.data, [pet.data for pet in orphan.orphanpet_set])
for orphan in parent.orphan_set]
))
# Without the `.aggregate_rows()` call, this would be 289!!
self.assertEqual(results, [
('p1',
[('c1', ['c1-p1', 'c1-p2']),
('c2', ['c2-p1']),
('c3', ['c3-p1']),
('c4', [])],
[('o1', ['o1-p1', 'o1-p2']),
('o2', ['o2-p1']),
('o3', ['o3-p1']),
('o4', [])],
),
('p2', [], []),
('p3',
[('c6', []),
('c7', ['c7-p1'])],
[('o6', ['o6-p1', 'o6-p2']),
('o7', ['o7-p1'])],)
])
def test_aggregate_with_unselected_joins(self):
with self.assertQueryCount(1):
query = (Child
.select(Child, ChildPet, Parent)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Child)
.join(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Child.data, ChildPet.data)
.aggregate_rows())
results = []
for child in query:
results.append((
child.data,
child.parent.data,
[child_pet.data for child_pet in child.childpet_set]))
self.assertEqual(results, [
('c6', 'p3', []),
('c7', 'p3', ['c7-p1']),
])
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, ChildPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Parent.data, Child.data, ChildPet.data)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set]))
self.assertEqual(results, [('p3', [
('c6', []),
('c7', ['c7-p1']),
])])
def test_aggregate_rows_ordering(self):
# Refs github #519.
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username.desc(), Blog.title.desc())
.aggregate_rows())
accum = []
for user in query:
accum.append((
user.username,
[blog.title for blog in user.blog_set]))
if sys.version_info[:2] > (2, 6):
self.assertEqual(accum, [
('u4', ['b6', 'b5']),
('u3', ['b4', 'b3']),
('u2', []),
('u1', ['b2', 'b1']),
])
def test_aggregate_rows_self_join(self):
self._build_category_tree()
Child = Category.alias()
# Same query, but this time use an `alias` on the join expr.
with self.assertQueryCount(1):
query = (Category
.select(Category, Child)
.join(
Child,
JOIN.LEFT_OUTER,
on=(Category.id == Child.parent).alias('childrenx'))
.order_by(Category.id, Child.id)
.aggregate_rows())
names_and_children = [
[parent.name, [child.name for child in parent.childrenx]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
def test_multiple_fks(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
UserAlias = User.alias()
with self.assertQueryCount(1):
query = (User
.select(User, Relationship, UserAlias)
.join(
Relationship,
JOIN.LEFT_OUTER,
on=Relationship.from_user)
.join(
UserAlias,
on=(
Relationship.to_user == UserAlias.id
).alias('to_user'))
.order_by(User.username, Relationship.id)
.where(User.username == 'charlie')
.aggregate_rows())
results = [row for row in query]
self.assertEqual(len(results), 1)
user = results[0]
self.assertEqual(user.username, 'charlie')
self.assertEqual(len(user.relationships), 2)
rh, rz = user.relationships
self.assertEqual(rh.to_user.username, 'huey')
self.assertEqual(rz.to_user.username, 'zaizee')
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
with self.assertQueryCount(1):
query = (Relationship
.select(Relationship, FromUser, ToUser)
.join(FromUser, on=from_join.alias('from_user'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.order_by(Relationship.id)
.aggregate_rows())
results = [
(relationship.from_user.username,
relationship.to_user.username)
for relationship in query]
self.assertEqual(results, [
('charlie', 'huey'),
('charlie', 'zaizee'),
('huey', 'charlie'),
('zaizee', 'charlie'),
])
def test_multiple_fks_multi_depth(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
human = Category.create(name='human')
kitty = Category.create(name='kitty')
UserCategory.create(user=charlie, category=human)
UserCategory.create(user=huey, category=kitty)
UserCategory.create(user=zaizee, category=kitty)
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
FromUserCategory = UserCategory.alias()
ToUserCategory = UserCategory.alias()
from_uc_join = (FromUser.id == FromUserCategory.user)
to_uc_join = (ToUser.id == ToUserCategory.user)
FromCategory = Category.alias()
ToCategory = Category.alias()
from_c_join = (FromUserCategory.category == FromCategory.id)
to_c_join = (ToUserCategory.category == ToCategory.id)
with self.assertQueryCount(1):
query = (Relationship
.select(
Relationship,
FromUser,
ToUser,
FromUserCategory,
ToUserCategory,
FromCategory,
ToCategory)
.join(FromUser, on=from_join.alias('from_user'))
.join(FromUserCategory, on=from_uc_join.alias('fuc'))
.join(FromCategory, on=from_c_join.alias('category'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.join(ToUserCategory, on=to_uc_join.alias('tuc'))
.join(ToCategory, on=to_c_join.alias('category'))
.order_by(Relationship.id)
.aggregate_rows())
results = []
for obj in query:
from_user = obj.from_user
to_user = obj.to_user
results.append((
from_user.username,
from_user.fuc[0].category.name,
to_user.username,
to_user.tuc[0].category.name))
self.assertEqual(results, [
('charlie', 'human', 'huey', 'kitty'),
('charlie', 'human', 'zaizee', 'kitty'),
('huey', 'kitty', 'charlie', 'human'),
('zaizee', 'kitty', 'charlie', 'human'),
])
class TestAggregateRowsRegression(ModelTestCase):
requires = [
User,
Blog,
Comment,
Category,
CommentCategory,
BlogData]
def setUp(self):
super(TestAggregateRowsRegression, self).setUp()
u = User.create(username='u1')
b = Blog.create(title='b1', user=u)
BlogData.create(blog=b)
c1 = Comment.create(blog=b, comment='c1')
c2 = Comment.create(blog=b, comment='c2')
cat1 = Category.create(name='cat1')
cat2 = Category.create(name='cat2')
CommentCategory.create(category=cat1, comment=c1, sort_order=1)
CommentCategory.create(category=cat2, comment=c1, sort_order=1)
CommentCategory.create(category=cat1, comment=c2, sort_order=2)
CommentCategory.create(category=cat2, comment=c2, sort_order=2)
def test_aggregate_rows_regression(self):
comments = (Comment
.select(
Comment,
CommentCategory,
Category,
Blog,
BlogData)
.join(CommentCategory, JOIN.LEFT_OUTER)
.join(Category, JOIN.LEFT_OUTER)
.switch(Comment)
.join(Blog)
.join(BlogData, JOIN.LEFT_OUTER)
.where(Category.id == 1)
.order_by(CommentCategory.sort_order))
with self.assertQueryCount(1):
c_list = list(comments.aggregate_rows())
def test_regression_506(self):
user = User.create(username='u2')
for i in range(2):
Blog.create(title='u2-%s' % i, user=user)
users = (User
.select()
.order_by(User.id.desc())
.paginate(1, 5)
.alias('users'))
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog)
.join(users, on=(User.id == users.c.id))
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1']),
('u2', ['u2-0', 'u2-1']),
])
class TestPrefetchNonPKFK(ModelTestCase):
requires = [Package, PackageItem]
data = {
'101': ['a', 'b'],
'102': ['c'],
'103': [],
'104': ['a', 'b', 'c', 'd', 'e'],
}
def setUp(self):
super(TestPrefetchNonPKFK, self).setUp()
for barcode, titles in self.data.items():
Package.create(barcode=barcode)
for title in titles:
PackageItem.create(package=barcode, title=title)
def test_prefetch(self):
packages = Package.select().order_by(Package.barcode)
items = PackageItem.select().order_by(PackageItem.id)
query = prefetch(packages, items)
for package, (barcode, titles) in zip(query, sorted(self.data.items())):
self.assertEqual(package.barcode, barcode)
self.assertEqual(
[item.title for item in package.items_prefetch],
titles)
packages = (Package
.select()
.where(Package.barcode << ['101', '104'])
.order_by(Package.id))
items = items.where(PackageItem.title << ['a', 'c', 'e'])
query = prefetch(packages, items)
accum = {}
for package in query:
accum[package.barcode] = [
item.title for item in package.items_prefetch]
self.assertEqual(accum, {
'101': ['a'],
'104': ['a', 'c','e'],
})
| |
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import shutil
import string
from tests.integrations.base import BaseTest, pull, gitfs_log # noqa
class TestWriteCurrentView(BaseTest):
def test_delete_directory_with_space_within_name(self, gitfs_log):
directory = "{}/new directory".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(directory)
with pull(self.sh):
# check if directory exists or not
directory_path = "{}/new directory".format(self.repo_path)
assert os.path.exists(directory_path)
# check for .keep file
keep_path = "{}/new directory/.keep".format(self.repo_path)
assert os.path.exists(keep_path)
self.assert_new_commit()
self.assert_commit_message("Create the /new directory directory")
with gitfs_log("SyncWorker: Set push_successful"):
shutil.rmtree("{}/new directory/".format(self.current_path))
with pull(self.sh):
self.assert_new_commit()
assert os.path.exists(directory) is False
def test_delete_a_directory(self, gitfs_log):
path = "{}/a_directory/another_dir/".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(path)
with pull(self.sh):
self.assert_new_commit()
with gitfs_log("SyncWorker: Set push_successful"):
shutil.rmtree("{}/a_directory/".format(self.current_path))
with pull(self.sh):
self.assert_commit_message("Update 2 items. Removed 2 items.")
self.assert_new_commit()
assert os.path.exists(path) is False
def test_rename_directory(self, gitfs_log):
old_dir = "{}/a_directory/".format(self.current_path)
new_dir = "{}/some_directory/".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(old_dir)
with pull(self.sh):
self.assert_new_commit()
with gitfs_log("SyncWorker: Set push_successful"):
os.rename(old_dir, new_dir)
with pull(self.sh):
self.assert_new_commit()
assert os.path.isdir(new_dir) is not False
assert os.path.exists(old_dir) is False
def test_link_a_file(self, gitfs_log):
filename = "{}/link_file".format(self.current_path)
link_name = "{}/new_link".format(self.current_path)
with open(filename, "w") as f:
f.write("some content")
with gitfs_log("SyncWorker: Set push_successful"):
os.link(filename, link_name)
with pull(self.sh):
self.assert_commit_message("Update 2 items. Added 2 items.")
is_link = os.path.isfile(link_name)
assert is_link is not False
def test_write_a_file(self, gitfs_log):
content = "Just a small file"
filename = "{}/new_file".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
with open(filename, "w") as f:
f.write(content)
# check if the write was done correctly
with open(filename) as f:
assert f.read() == content
# check if a commit was made
with pull(self.sh):
self.assert_new_commit()
self.assert_commit_message("Update /new_file")
def test_create_a_directory(self, gitfs_log):
directory = "{}/new_directory".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(directory)
with pull(self.sh):
# check if directory exists or not
directory_path = "{}/new_directory".format(self.repo_path)
assert os.path.exists(directory_path)
# check for .keep file
keep_path = "{}/new_directory/.keep".format(self.repo_path)
assert os.path.exists(keep_path)
self.assert_new_commit()
self.assert_commit_message("Create the /new_directory directory")
def test_write_in_keep_file(self):
directory = "{}/new_directory".format(self.current_path)
with pytest.raises(IOError):
with open("{}/.keep".format(directory), "w") as f:
f.write("some content")
def test_create_embedded_directory(self, gitfs_log):
directory = "{}/directory/embedded-directory".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(directory)
with pull(self.sh):
# check if directory exists or not
directory_path = "{}/directory/embedded-directory".format(self.repo_path)
assert os.path.exists(directory_path)
# check the existence of the .keep files
keep_files = [
"{}/directory/.keep".format(self.repo_path),
"{}/directory/embedded-directory/.keep".format(self.repo_path),
]
for keep_file in keep_files:
assert os.path.exists(keep_file)
self.assert_new_commit()
commit_msg = "Update 2 items. Added 2 items."
self.assert_commit_message(commit_msg)
def test_create_directory_inside_an_already_existing_directory(self, gitfs_log):
directory = "{}/directory/new-embedded-directory".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(directory)
with pull(self.sh):
# check if directory exists or not
directory_path = "{}/directory/new-embedded-directory".format(
self.repo_path
)
assert os.path.exists(directory_path)
# check the existence of the .keep files
keep_files = [
"{}/directory/.keep".format(self.repo_path),
"{}/directory/new-embedded-directory/.keep".format(self.repo_path),
]
for keep_file in keep_files:
assert os.path.exists(keep_file)
self.assert_new_commit()
commit_msg = "Create the /directory/new-embedded-directory directory"
self.assert_commit_message(commit_msg)
def test_create_embedded_directory_on_multiple_levels(self, gitfs_log):
directory = "{}/a/b/c".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(directory)
with pull(self.sh):
# check if directory exists or not
directory_path = "{}/a/b/c".format(self.repo_path)
assert os.path.exists(directory_path)
# check the existence of the .keep files
keep_files = [
"{}/a/.keep".format(self.repo_path),
"{}/a/b/.keep".format(self.repo_path),
"{}/a/b/c/.keep".format(self.repo_path),
]
for keep_file in keep_files:
assert os.path.exists(keep_file)
self.assert_new_commit()
commit_msg = "Update {} items. Added {} items.".format(
len(keep_files), len(keep_files)
)
self.assert_commit_message(commit_msg)
def test_create_embedded_directory_big_depth(self, gitfs_log):
path = ""
for letter in string.ascii_lowercase:
path = os.path.join(path, letter)
with gitfs_log("SyncWorker: Set push_successful"):
os.makedirs(os.path.join(self.current_path, path))
with pull(self.sh):
# check if directory exists or not
directory_path = os.path.join(self.repo_path, path)
assert os.path.exists(directory_path)
# build the paths for the keep files
keep_files = []
path = self.repo_path
for letter in string.ascii_lowercase:
path = os.path.join(path, letter)
path_with_keep = os.path.join(path, ".keep")
keep_files.append(path_with_keep)
# check the existence of the .keep files
for keep_file in keep_files:
assert os.path.exists(keep_file)
def test_chmod_valid_mode(self, gitfs_log):
filename = "{}/testing".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.chmod(filename, 0o755)
with pull(self.sh):
# check if the right mode was set
stats = os.stat(filename)
assert stats.st_mode == 0o100755
self.assert_new_commit()
self.assert_commit_message("Chmod to 0755 on /testing")
def test_chmod_invalid_mode(self):
filename = "{}/testing".format(self.current_path)
with pytest.raises(OSError):
os.chmod(filename, 0o777)
def test_rename(self, gitfs_log):
old_filename = "{}/testing".format(self.current_path)
new_filename = "{}/new_testing".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.rename(old_filename, new_filename)
with pull(self.sh):
# check for new file
assert os.path.exists(new_filename)
self.assert_new_commit()
self.assert_commit_message("Rename /testing to /new_testing")
def test_fsync(self, gitfs_log):
filename = "{}/me".format(self.current_path)
content = "test fsync"
with gitfs_log("SyncWorker: Set push_successful"):
with open(filename, "w") as f:
f.write(content)
os.fsync(f.fileno())
with pull(self.sh):
self.assert_new_commit()
self.assert_commit_message("Update 1 items. Added 2 items.")
def test_create(self, gitfs_log):
filename = "{}/new_empty_file".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
open(filename, "a").close()
with pull(self.sh):
self.assert_new_commit()
self.assert_commit_message("Created /new_empty_file")
def test_symbolic_link(self, gitfs_log):
target = "me"
name = "{}/links".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
os.symlink(target, name)
with pull(self.sh):
# check if link exists
assert os.path.exists(name)
self.assert_new_commit()
self.assert_commit_message(
"Create symlink to {} for " "/links".format(target)
)
def test_edit_file(self, gitfs_log):
content = "first part"
continuation = "second part"
filename = "{}/some_file".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
with open(filename, "w") as f:
f.write(content)
with pull(self.sh):
with open(filename) as f:
assert f.read() == content
self.assert_new_commit()
with pull(self.sh):
self.assert_commit_message("Update /some_file")
with gitfs_log("SyncWorker: Set push_successful"):
with open(filename, "w") as f:
f.write(continuation)
with pull(self.sh):
with open(filename) as f:
assert f.read() == continuation
self.assert_new_commit()
with pull(self.sh):
self.assert_commit_message("Update /some_file")
def test_create_multiple_files(self, gitfs_log):
content = "Just a small file"
no_of_files = 10
filename = "{}/new_file".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
for i in range(no_of_files):
with open(filename + str(i), "w") as f:
f.write(content)
with pull(self.sh):
for i in range(no_of_files):
with open(filename + str(i)) as f:
assert f.read() == content
self.assert_new_commit()
with pull(self.sh):
self.assert_commit_message(
"Update {} items. Added {} items.".format(no_of_files, no_of_files)
)
def test_delete_file(self, gitfs_log):
filename = "{}/deletable_file".format(self.current_path)
with gitfs_log("SyncWorker: Set push_successful"):
with open(filename, "w") as f:
f.write("some content")
with pull(self.sh):
self.assert_new_commit()
self.assert_commit_message("Update /deletable_file")
with gitfs_log("SyncWorker: Set push_successful"):
os.remove(filename)
with pull(self.sh):
assert not os.path.exists(filename)
self.assert_commit_message("Deleted /deletable_file")
| |
#! /usr/bin/python3
import quick2wire.i2c as twi
import os, math, time
slave = 0x60
interval = 1
#i2c command table
PARTID = 0x00; UCOEF = [0x13, 0x14, 0x15, 0x16]
REVID = 0x01; PARAW = 0x17
SEQID = 0x02; COMM = 0x18
INTCFG = 0x03; RESP = 0x20
IRQEN = 0x04; IRSTA = 0x21
HWKEY = 0x07; VIDAT = [0x22, 0x23]
MRATE = [0x08, 0x09]; IRDAT = [0x24, 0x25]
PARAR = 0x2e; UVDAT = [0x2c, 0x2d]
CHSTAT = 0x30; INKEY = [0x3b, 0x3c, 0x3d, 0x3e]
#ram command table
SLADDR = 0x00; IRMISC = 0x1f
CHLIST = 0x01
ALSENC = 0x06
IRADMUX = 0x0e
AUXMUX = 0x0f
VISADCO = 0x10
VISGAIN = 0x11
VISMISC = 0x12
IRADCO = 0x1d
IRGAIN = 0x1e
#command register extensions
QUERY = 0x80; NOP = 0x00; I2CADDR = 0x02; ALSFORCE = 0x06; ALSAUTO = 0x0e
PSET = 0xa0; RST = 0x01; GETCAL = 0x12; ALSPAUSE = 0x0a
#read ram_parameter
RD_SLADDR = (QUERY | SLADDR); RD_VISGAIN = QUERY | VISGAIN
RD_CHLIST = QUERY | CHLIST; RD_VISMISC = QUERY | VISMISC
RD_ALSENC = QUERY | ALSENC; RD_IRADCO = QUERY | IRADCO
RD_IRADMUX = QUERY | IRADMUX; RD_IRGAIN = QUERY | IRGAIN
RD_AUXMUX = QUERY | AUXMUX; RD_IRMISC = QUERY | IRMISC
RD_VISADCO = QUERY | VISADCO
#write ram_parameter
WR_SLADDR = PSET | SLADDR; WR_VISGAIN = PSET | VISGAIN
WR_CHLIST = PSET | CHLIST; WR_VISMISC = PSET | VISMISC
WR_ALSENC = PSET | ALSENC; WR_IRADCO = PSET | IRADCO
WR_IRADMUX = PSET | IRADMUX; WR_IRGAIN = PSET | IRGAIN
WR_AUXMUX = PSET | AUXMUX; WR_IRMISC = PSET | IRMISC
WR_VISADCO = PSET | VISADCO
#some constants
VIS = 0; IR = 1; UV = 2
UCOVAL = [0x7b, 0x6b, 0x01, 0x00]
with twi.I2CMaster(1) as twibus :
#i2c routines
def i2c_read(reg) :
readvalue = twibus.transaction(twi.writing_bytes(slave, reg), twi.reading(slave, 1))
return readvalue[0][0]
def i2c_write(reg, value) :
twibus.transaction(twi.writing_bytes(slave, reg, value))
def i2c_read_more(reg, length) :
readvalue = twibus.transaction(twi.writing_bytes(slave, reg), twi.reading(slave, length))
return readvalue
#ram routines
def ram_command_write(wr_ram_register, ram_value) :
state = True
while (state == True) :
i2c_write(COMM, NOP)
if(ram_value >= 0) :
i2c_write(PARAW, ram_value)
if(i2c_read(RESP) == 0x00) :
i2c_write(COMM, wr_ram_register)
rsp = i2c_read(RESP)
if(rsp != 0x00) :
state = False
return rsp
def ram_command_read(rd_ram_register) :
state = True
ram_value = 0
while (state == True) :
i2c_write(COMM, NOP)
if(i2c_read(RESP) == 0x00) :
i2c_write(COMM, rd_ram_register)
rsp = i2c_read(RESP)
if(rsp != 0x00) :
state = False
if((rd_ram_register & QUERY) == QUERY) :
ram_value = i2c_read(PARAR)
return ram_value
return rsp
#i2c command functions
def get_part_id() :
part = i2c_read(PARTID)
return part
def get_revision() :
rev = i2c_read(REVID)
return rev
def get_sequenz() :
seq = i2c_read(SEQID)
return seq
def enable_int_output(state) :
i2c_write(INTCFG, state)
state = i2c_read(INTCFG)
return state
def enable_irq(state) :
i2c_write(IRQEN, state)
state = i2c_read(IRQEN)
return state
def set_int_out_pin(state) :
i2c_write(INTCFG, state)
state = i2c_read(INTCFG)
return state
def set_hardware_key() :
i2c_write(HWKEY, 0x17)
if(i2c_read(HWKEY) == 0x17) :
return True
return False
def set_measure_rate(rate) :
for i in range(2) :
i2c_write(MRATE[i], (rate >> (8 * i)) & 0xff)
rate = i2c_read_more(MRATE[0], 2)
return (rate[0][1] << 8) | rate[0][0]
def read_irq_state() :
return i2c_read(IRSTA) & 0x21
def read_sensor_data(senstype) :
global multi
global sensword
multi = 1
sensword = 0
if(senstype == VIS) :
comm = VIDAT
#multi = 14.5 / 0.282
elif(senstype == IR) :
comm = IRDAT
#multi = 14.5 * 0.41
elif(senstype == UV) :
comm = UVDAT
sensdata = i2c_read_more(comm[0], 2)
sensword = (sensdata[0][1] << 8) | sensdata[0][0]
if(senstype == UV) :
sensword /= 100
elif(senstype == IR) :
if(sensword > 256) :
sensword -= 256
sensword *= 14.5
else :
sensword = 0
elif(senstype == VIS) :
if(sensword > 256) :
sensword -= 256
sensword *= 14.5
else :
sensword = 0
return sensword
def chip_state() :
state = i2c_read(CHSTAT) & 0x07
return state
#ram command functions
def reset_ir_adcmux() :
state = ram_command_write(WR_IRADMUX, 0x00)
state = ram_command_read(RD_IRADMUX)
return state
def set_aux_mux() :
state = ram_command_write(WR_AUXMUX, 0x65)
state = ram_command_read(RD_AUXMUX)
return state
def set_ir_range() :
state = ram_command_write(WR_IRMISC, 0x20)
state = ram_command_read(RD_IRMISC)
return state
def set_vis_range() :
state = ram_command_write(WR_VISMISC, 0x20)
state = ram_command_read(RD_VISMISC)
return state
def reset_ir_range() :
state = ram_command_write(WR_IRMISC, 0x00)
state = ram_command_read(RD_IRMISC)
return state
def reset_vis_range() :
state = ram_command_write(WR_VISMISC, 0x00)
state = ram_command_read(RD_VISMISC)
return state
def set_als_mode(mode) :
state = ram_command_write(mode, -1)
state = ram_command_read(mode)
return mode
def enable_als() :
state = ram_command_write(WR_CHLIST, 0xf0)
state = ram_command_read(RD_CHLIST)
return state
def visible_gain(v) :
state = ram_command_write(WR_VISGAIN, v)
state = ram_command_read(RD_VISGAIN)
return state
def irda_gain(v) :
state = ram_command_write(WR_IRGAIN, v)
state = ram_command_read(RD_IRGAIN)
return state
def ir_adc_counter(c) :
state = ram_command_write(WR_IRADCO, c)
state = ram_command_read(RD_IRADCO)
def vis_adc_counter(c) :
state = ram_command_write(WR_VISADCO, c)
state = ram_command_read(RD_VISADCO)
def init_sensor() :
for i in range(4) :
i2c_write(UCOEF[i], UCOVAL[i])
set_hardware_key()
set_measure_rate(0x080a)
enable_als()
visible_gain(0x00)
irda_gain(0x00)
vis_adc_counter(0x70)
ir_adc_counter(0x70)
set_als_mode(ALSAUTO)
reset_ir_adcmux()
set_aux_mux()
set_ir_range()
set_vis_range()
#tool to store data in files per timestamp
def write_data_to(file = "") :
timestamp = time.asctime(time.localtime(time.time()))
if(file != "") :
filehandle = open(file, "a")
lux = 0
uvindex = 0
uvindex = read_sensor_data(UV) / 1
lux = (read_sensor_data(VIS) * 6.33) + (read_sensor_data(IR) * - 0.094)
filehandle.write("time+%s+VIS+%6.1f+IR+%6.1f+UV+%3.1f+LUX+%6.1f\r\n" % (timestamp,
read_sensor_data(VIS),
read_sensor_data(IR),
uvindex,
lux))
filehandle.close()
else :
print("Visual value: %6ilux Infrared value: %6ilux UV-Index: %3.1f" %
(read_sensor_data(VIS),
read_sensor_data(IR),
read_sensor_data(UV)))
init_sensor()
time.sleep(1)
write_data_to("/var/www/wetter/si1132.dat")
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks the all-reduce algorithms of tf_cnn_benchmarks.
tf_cnn_benchmarks uses all-reduce to aggregate gradients. This benchmark is
useful for benchmarking the performance of just this gradient aggregation,
instead of the entire model. All the flags that tf_cnn_benchmarks accepts are
also accepted by this script, although many are silently ignored.
The number and shapes of the tensors all-reduced are those of the variables of
the model specified by the --model flag.
TODO(reedwm): Allow custom sizes to be specified.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags as absl_flags
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import benchmark_cnn
import cnn_util
import flags
from cnn_util import log_fn
absl_flags.DEFINE_integer('iters_per_step', 5,
'Number of iterations to run all-reduce for, per '
'step. Every step, a session will be run on a Graph '
'that contains this many copies of the all-reduce. '
'The copies are run sequentially. Setting this above '
'1 is useful to lower the overhead of starting the '
'session run, running the VariableV2 ops at the '
'start of the step, etc.')
flags.define_flags()
for name in flags.param_specs.keys():
absl_flags.declare_key_flag(name)
def get_var_shapes(model):
"""Returns the list of variable shapes for a tf_cnn_benchmarks Model."""
with tf.Graph().as_default():
# The variable shapes do not depend on the batch size.
images = tf.placeholder(tf.float32, model.get_input_shapes('train')[0])
model.build_network([images])
return [[int(d) for d in v.shape.dims] for v in tf.trainable_variables()]
def all_reduce(all_device_tensors, variable_mgr):
"""Performs a single batch all-reduce.
Args:
all_device_tensors: List of lists of tensors. all_device_tensors[t][i] is
a tensor, where t is the tower the tensor is on and i is the index of
the tensor.
variable_mgr: The VariableMgr to perform the all-reduce.
Returns:
List of list of tensors in the same form as `all_device_tensors`, except the
tensors are aggregated across towers.
"""
tower_grads = [[(g, None) for g in device_tensors] for
device_tensors in all_device_tensors]
_, aggregated_tower_grads = variable_mgr.preprocess_device_grads(tower_grads)
return [
[g for g, _ in agg_device_tensors]
for agg_device_tensors in aggregated_tower_grads]
def build_all_reduce_iterations(all_device_tensors, tower_devices, variable_mgr,
num_iters):
"""Builds the all-reduce ops for multiple iterations to aggregate tensors.
The tensors in `all_device_tensors` are aggregated `num_iters` times. Each
iteration aggregates the results from the previous iteration. The iterations
are run sequentially, so the aggregations for an iteration do not start
running until the previous iteration has completed. Each iteration after the
first is aggregating already-aggregated values, but it does not matter because
we are only aggregating for benchmarking purposes.
Args:
all_device_tensors: List of lists of tensors. all_device_tensors[t][i] is
a tensor, where t is the tower the tensor is on and i is the index of
the tensor.
tower_devices: A list of device strings. tower_devices[t] is the device
of the tensors in all_device_tensors[t].
variable_mgr: The VariableMgr to perform the all-reduce.
num_iters: Number of iterations to aggregate tensors for.
Returns:
An op that when run, causes the all-reduce ops to run.
"""
for i in range(num_iters):
with tf.name_scope('iteration_%d' % i):
# Step 1: Do the aggregation.
with tf.name_scope('tensor_aggregation'):
all_device_tensors = all_reduce(all_device_tensors, variable_mgr)
# Step 2. Create identity ops, to bring the aggregated results back to
# each device.
new_all_device_tensors = []
for device, device_tensors in zip(tower_devices, all_device_tensors):
with tf.device(device):
new_all_device_tensors.append([
tf.identity(t, name='identity_after_allreduce')
for t in device_tensors
])
all_device_tensors = new_all_device_tensors
# Step 3. Add control dependencies to delay the next iteration until this
# iteration is complete. To avoid extra overhead, we do not have any
# cross-device control dependencies, which means it's possible for two
# iterations to slightly overlap.
new_all_device_tensors = []
for device_tensors in all_device_tensors:
new_all_device_tensors.append([
control_flow_ops.with_dependencies(
device_tensors, t, name='identity_after_dependencies')
for t in device_tensors
])
all_device_tensors = new_all_device_tensors
# To prevent the dependency optimizer from removing every op we created,
# we store the results in variables.
ops_to_run = []
for device, device_tensors in zip(tower_devices, all_device_tensors):
with tf.device(device):
for t in device_tensors:
# The placeholder initial value is never run.
var = tf.Variable(tf.placeholder(tf.float32, t.shape), collections=[])
ops_to_run.append(var.assign(t))
return tf.group(*ops_to_run)
def build_graph(tower_devices, tensor_shapes, variable_mgr, num_iters):
"""Builds the graph for the benchmark.
Args:
tower_devices: A list of device strings of the devices to run the all-reduce
benchmark on.
tensor_shapes: A list of shapes of the tensors that will be aggregated for
the all-reduce.
variable_mgr: The VariableMgr to perform the all-reduce.
num_iters: Number of iterations to aggregate tensors for.
Returns:
An op that runs the benchmark.
"""
all_device_tensors = []
for i, tower_device in enumerate(tower_devices):
with tf.device(tower_device):
device_tensors = []
for j, shape in enumerate(tensor_shapes):
tensor = tf.Variable(tf.random_normal(shape, dtype=tf.float32),
name='tensor_%d_on_device_%d' % (j, i))
device_tensors.append(tensor)
all_device_tensors.append(device_tensors)
log_fn('Building all-reduce ops')
benchmark_op = build_all_reduce_iterations(all_device_tensors, tower_devices,
variable_mgr, num_iters)
log_fn('Done building all-reduce ops')
return benchmark_op
def run_graph(benchmark_op, bench_cnn, init_ops, dummy_loss_op):
"""Runs the graph for the benchmark.
Args:
benchmark_op: An op that runs the benchmark.
bench_cnn: The BenchmarkCNN where params and other attributes are obtained.
init_ops: A list of ops that are run before `benchmark_op` for
initialization.
dummy_loss_op: Any op. We must pass a loss op to
`benchmark_cnn.benchmark_one_step`, but the result of the op is never
actually used.
"""
config = benchmark_cnn.create_config_proto(bench_cnn.params)
with tf.Session(config=config) as sess:
for op in init_ops:
sess.run(op)
step_train_times = []
fetches = {'average_loss': dummy_loss_op, 'benchmark_op': benchmark_op}
log_fn('Running warmup')
for i in range(-bench_cnn.num_warmup_batches, bench_cnn.num_batches):
if i == 0:
log_fn('Running all-reduce ops')
start = time.time()
if i > 0 and i % bench_cnn.params.display_every == 0:
log_fn('Iteration: %d. Average time per step so far: %s' %
(i, (time.time() - start) / i))
# Call benchmark_one_step instead of directly calling sess.run(...), to
# potentially get a trace file, partitioned graphs, etc.
benchmark_cnn.benchmark_one_step(
sess=sess,
fetches=fetches,
step=i,
# The batch size is only used for the images/sec calculation, which is
# not actually calculated because we pass show_images_per_sec=False.
batch_size=None,
step_train_times=step_train_times,
trace_filename=bench_cnn.trace_filename,
partitioned_graph_file_prefix=(
bench_cnn.params.partitioned_graph_file_prefix),
profiler=None,
image_producer=None,
params=bench_cnn.params,
show_images_per_sec=False)
log_fn('Average time per step: %s' %
((time.time() - start) / bench_cnn.num_batches))
def run_benchmark(bench_cnn, num_iters):
"""Runs the all-reduce benchmark.
Args:
bench_cnn: The BenchmarkCNN where params, the variable manager, and other
attributes are obtained.
num_iters: Number of iterations to do all-reduce for for.
Raises:
ValueError: Invalid params of bench_cnn.
"""
if bench_cnn.params.variable_update != 'replicated':
raise ValueError('--variable_update=replicated must be specified to use'
'the all-reduce benchmark')
if bench_cnn.params.variable_consistency == 'relaxed':
raise ValueError('--variable_consistency=relaxed is not supported')
benchmark_op = build_graph(bench_cnn.raw_devices,
get_var_shapes(bench_cnn.model),
bench_cnn.variable_mgr, num_iters)
init_ops = [
tf.global_variables_initializer(),
bench_cnn.variable_mgr.get_post_init_ops()
]
loss_op = tf.no_op()
if bench_cnn.graph_file:
path, filename = os.path.split(bench_cnn.graph_file)
as_text = filename.endswith('txt')
log_fn('Writing GraphDef as %s to %s' % (
'text' if as_text else 'binary', bench_cnn.graph_file))
tf.train.write_graph(tf.get_default_graph().as_graph_def(add_shapes=True),
path, filename, as_text)
run_graph(benchmark_op, bench_cnn, init_ops, loss_op)
# TODO(reedwm): Reduce redundancy with tf_cnn_benchmarks
def main(positional_arguments):
# Command-line arguments like '--distortions False' are equivalent to
# '--distortions=True False', where False is a positional argument. To prevent
# this from silently running with distortions, we do not allow positional
# arguments.
assert len(positional_arguments) >= 1
if len(positional_arguments) > 1:
raise ValueError('Received unknown positional arguments: %s'
% positional_arguments[1:])
params = benchmark_cnn.make_params_from_flags()
params = benchmark_cnn.setup(params)
bench = benchmark_cnn.BenchmarkCNN(params)
tfversion = cnn_util.tensorflow_version_tuple()
log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))
run_benchmark(bench, absl_flags.FLAGS.iters_per_step)
if __name__ == '__main__':
app.run(main) # Raises error on invalid flags, unlike tf.app.run()
| |
#!/usr/bin/env python
# encoding: utf-8
"""
The Play management
"""
import datetime
from mongokit import Document, DocumentMigration
class PlayMigration(DocumentMigration):
"""A DocumentMigration for the Play class"""
def __init__(self, *args):
DocumentMigration.__init__(self, *args)
def allmigration01_add_comment(self):
"""Add the comment field to all"""
self.target = {'comment': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'comment': None}} # pylint: disable=W0201
def allmigration02_add_reason(self):
"""Add the comment field to all"""
self.target = {'winners_reason': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'winners_reason': []}} # pylint: disable=W0201
def allmigration03_add_created_by(self):
"""Add the created_by play field"""
self.target = {'created_by': {'$exists': False}} # pylint: disable=W0201
self.update = {'$set': {'created_by': 'migration'}} # pylint: disable=W0201
class Play(Document):
"""
A database Play
"""
def __init__(self, *args, **kwargs):
# Document needs a lot of parameters
Document.__init__(self, *args, **kwargs)
# store the elo per player (before, after)
self.elos_per_player = {}
# with this you will be able to use
# play.date = blah
# play.players = blah2
use_dot_notation = True
__collection__ = 'plays'
structure = {
'date': datetime.datetime,
'game': basestring,
'created_by': basestring, # who created the play
'winners': [basestring], # a forced list of winners
'winners_reason': [basestring], # The forced list of winner reason
'wintype': basestring, # max or min
'comment': basestring, # A play comment
'players': [
{
'login': basestring,
'score': int,
'role': basestring,
'color': basestring,
'team': basestring,
'team_color': basestring
}
]
}
required_fields = ['date', 'game']
default_values = {
'winners': [],
'players': [],
'wintype': 'max'
}
def set_date(self, date):
"""Set the date
:type date: datetime.datetime"""
self['date'] = date
def set_game(self, game):
"""Set the game
:type game: basestring"""
self['game'] = game
def set_created_by(self, creator):
"""Set the created_by field
:type creator: basestring"""
self['created_by'] = creator
def add_player(self, player_dict):
"""Adds a new player to the play
:type player_dict: dict
{
'login': basestring,
'score': int,
'role': basestring,
'color': basestring,
'team': basestring,
'team_color': basestring
}
"""
self['players'].append(player_dict)
@staticmethod
def create_player(login, score, role=None, team=None):
"""Return a player instance
suitable to be added using the add_player method
:rtype: dict"""
return {
'login': login,
'score': score,
'role': role,
'color': None,
'team': team,
'team_color': None
}
def get_player(self, login):
"""Return the player with the given login"""
for player in self['players']:
if player['login'] == login:
return player
raise ValueError('player with login %s not found' % login)
def get_player_order(self):
"return a list of tuple [(score, [players])] ordered per score"
player_per_score = {}
for (player, score) in [(player['login'], player['score'])
for player in self['players']]:
if score not in player_per_score:
player_per_score[score] = []
player_per_score[score].append(player)
if hasattr(self, 'wintype') and self['wintype'] == 'min':
return sorted(player_per_score.items(), key=lambda x: x[0])
return sorted(player_per_score.items(),
key=lambda x: x[0], reverse=True)
def get_player_position(self, login):
"""Return the position of the player with the given login
:type login: basestring
:rtype: int
"""
for index, score_players in enumerate(self.get_player_order()):
players = score_players[1]
if login in players:
return index + 1
raise ValueError('Player with login %s not found in play %s' % (login, self))
def get_winners(self):
"return the list of player names that wins the play"
if self['winners'] is not None and \
isinstance(self['winners'], list) and \
self['winners'] != []:
return self['winners']
elif self['winners'] is not None and not isinstance(self['winners'], list):
raise TypeError('Expected type for winners is list but found %s' %
type(self['winners']))
order = self.get_player_order()
if order != []:
return self.get_player_order()[0][1]
return []
def get_highest_score(self):
"return the high score of the play"
order = self.get_player_order()
if order != []:
return order[0][0]
return 0
def get_lowest_score(self):
"return the lowest score of the play"
order = self.get_player_order()
if order != []:
return order[-1][0]
return 0
# pylint: disable=C0103
@property
def id(self):
"""return the id"""
return '%s' % self['_id']
@property
def is_max(self):
"""Return True if play has a maxtype score"""
return 'wintype' in self and self['wintype'] == 'max'
@property
def teams(self):
"""Return the map of teams
{ name: team_name, players: [...]}
"""
teams = dict()
for player in self.players:
team = player['team']
if team not in teams:
teams[team] = []
teams[team].append(player['login'])
return teams
def set_elos(self, elos_per_player):
"""Set the elos per player
:param elos_per_player: The elos per player whre key is player login
and value is a tuple (elo_pre_play, elo_post_play)
:type elos_per_player: dict(basestring, tuple(int,int))"""
self.elos_per_player = elos_per_player
| |
'''Arsenal Pyramid WSGI application.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import ConfigParser
import os
import time
from pyramid.config import Configurator
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Allow, Authenticated
from pyramid.renderers import JSON
from pyramid_xml_renderer import XML
from sqlalchemy import engine_from_config
from sqlalchemy import event
from sqlalchemy.exc import DisconnectionError
import ldap
from .views import global_groupfinder
from .models.common import (
DBSession,
Base,
Group,
)
class RootFactory(object):
'''Top level ACL class.'''
# Additional ACLs loaded from the DB below
__acl__ = [
(Allow, Authenticated, ('view', 'tag_write', 'tag_delete'))
]
def __init__(self, request):
pass
def get_settings(global_config, settings):
'''Read in settings from config files.'''
# Secrets
mcp = ConfigParser.ConfigParser()
mcp.read(settings['arsenal.secrets_file'])
for key, val in mcp.items("app:main"):
settings[key] = val
scp = ConfigParser.SafeConfigParser()
scp.read(global_config)
for key, val in scp.items("app:safe"):
settings[key] = val
return settings
def checkout_listener(dbapi_con, con_record, con_proxy):
'''Test the listener.'''
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except Exception, ex:
import sys
print >> sys.stderr, "Error: %s (%s)" % (Exception, ex)
raise DisconnectionError()
def main(global_config, **settings):
'''This function returns a Pyramid WSGI application.'''
settings = get_settings(global_config['__file__'], settings)
# Have to do this becasue it can be a boolean or a string.
if settings['arsenal.verify_ssl'] == 'True':
settings['arsenal.verify_ssl'] = bool(settings['arsenal.verify_ssl'])
if settings['arsenal.verify_ssl'] == 'False':
settings['arsenal.verify_ssl'] = bool('')
log = logging.getLogger(__name__)
engine = engine_from_config(settings, 'sqlalchemy.')
event.listen(engine, 'checkout', checkout_listener)
DBSession.configure(bind=engine)
Base.metadata.bind = engine
#
# Routes
#
config = Configurator(settings=settings, root_factory=RootFactory)
config.include('pyramid_chameleon')
config.include('pyramid_ldap')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('user', '/user')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('signup', '/signup')
config.add_route('healthcheck', '/healthcheck')
config.add_route('help', '/help')
config.add_route('search', '/search')
config.add_route('data_centers', '/data_centers')
config.add_route('data_center', '/data_centers/{id}')
config.add_route('data_centers_audit', '/data_centers_audit')
config.add_route('data_center_audit', '/data_centers_audit/{id}')
config.add_route('ip_addresses', '/ip_addresses')
config.add_route('ip_address', '/ip_addresses/{id}')
config.add_route('ip_addresses_audit', '/ip_addresses_audit')
config.add_route('ip_address_audit', '/ip_addresses_audit/{id}')
config.add_route('network_interfaces', '/network_interfaces')
config.add_route('network_interface', '/network_interfaces/{id}')
config.add_route('network_interfaces_audit', '/network_interfaces_audit')
config.add_route('network_interface_audit', '/network_interfaces_audit/{id}')
config.add_route('nodes', '/nodes')
config.add_route('node', '/nodes/{id}')
config.add_route('nodes_audit', '/nodes_audit')
config.add_route('node_audit', '/nodes_audit/{id}')
config.add_route('node_groups', '/node_groups')
config.add_route('node_group', '/node_groups/{id}')
config.add_route('node_groups_audit', '/node_groups_audit')
config.add_route('node_group_audit', '/node_groups_audit/{id}')
config.add_route('physical_locations', '/physical_locations')
config.add_route('physical_location', '/physical_locations/{id}')
config.add_route('physical_locations_audit', '/physical_locations_audit')
config.add_route('physical_location_audit', '/physical_locations_audit/{id}')
config.add_route('physical_devices', '/physical_devices')
config.add_route('physical_device', '/physical_devices/{id}')
config.add_route('physical_devices_audit', '/physical_devices_audit')
config.add_route('physical_device_audit', '/physical_devices_audit/{id}')
config.add_route('physical_elevations', '/physical_elevations')
config.add_route('physical_elevation', '/physical_elevations/{id}')
config.add_route('physical_elevations_audit', '/physical_elevations_audit')
config.add_route('physical_elevation_audit', '/physical_elevations_audit/{id}')
config.add_route('physical_racks', '/physical_racks')
config.add_route('physical_rack', '/physical_racks/{id}')
config.add_route('physical_racks_audit', '/physical_racks_audit')
config.add_route('physical_rack_audit', '/physical_racks_audit/{id}')
config.add_route('render_rack', '/render_rack')
config.add_route('statuses', '/statuses')
config.add_route('status', '/statuses/{id}')
config.add_route('statuses_audit', '/statuses_audit')
config.add_route('status_audit', '/statuses_audit/{id}')
config.add_route('tags', '/tags')
config.add_route('tag', '/tags/{id}')
config.add_route('tags_audit', '/tags_audit')
config.add_route('tag_audit', '/tags_audit/{id}')
config.add_route('hardware_profiles', '/hardware_profiles')
config.add_route('hardware_profile', '/hardware_profiles/{id}')
config.add_route('hardware_profiles_audit', '/hardware_profiles_audit')
config.add_route('hardware_profile_audit', '/hardware_profiles_audit/{id}')
config.add_route('operating_systems', '/operating_systems')
config.add_route('operating_system', '/operating_systems/{id}')
config.add_route('operating_systems_audit', '/operating_systems_audit')
config.add_route('operating_system_audit', '/operating_systems_audit/{id}')
#
# API Endpoints. Order matters.
#
# api_register is a special endpoint in order to use pyramid
# secirty to control access to node registrations. Don't love it
# but can't use request_param on a put request.
config.add_route('api_register', '/api/register')
config.add_route('api_enc', '/api/enc')
config.add_route('api_data_centers', '/api/data_centers')
config.add_route('api_data_center_r', '/api/data_centers/{id}/{resource}')
config.add_route('api_data_center', '/api/data_centers/{id}')
config.add_route('api_data_centers_audit', '/api/data_centers_audit')
config.add_route('api_data_center_audit_r', '/api/data_centers_audit/{id}/{resource}')
config.add_route('api_data_center_audit', '/api/data_centers_audit/{id}')
config.add_route('api_nodes', '/api/nodes')
config.add_route('api_node_r', '/api/nodes/{id}/{resource}')
config.add_route('api_node', '/api/nodes/{id}')
config.add_route('api_nodes_audit', '/api/nodes_audit')
config.add_route('api_node_audit_r', '/api/nodes_audit/{id}/{resource}')
config.add_route('api_node_audit', '/api/nodes_audit/{id}')
config.add_route('api_statuses', '/api/statuses')
config.add_route('api_status_r', '/api/statuses/{id}/{resource}')
config.add_route('api_status', '/api/statuses/{id}')
config.add_route('api_statuses_audit', '/api/statuses_audit')
config.add_route('api_status_audit_r', '/api/statuses_audit/{id}/{resource}')
config.add_route('api_status_audit', '/api/statuses_audit/{id}')
config.add_route('api_tags', '/api/tags')
config.add_route('api_tag_r', '/api/tags/{id}/{resource}')
config.add_route('api_tag', '/api/tags/{id}')
config.add_route('api_b_tags_deassign', '/api/bulk/tags/deassign')
config.add_route('api_tags_audit', '/api/tags_audit')
config.add_route('api_tag_audit_r', '/api/tags_audit/{id}/{resource}')
config.add_route('api_tag_audit', '/api/tags_audit/{id}')
config.add_route('api_hardware_profiles', '/api/hardware_profiles')
config.add_route('api_hardware_profile_r', '/api/hardware_profiles/{id}/{resource}')
config.add_route('api_hardware_profile', '/api/hardware_profiles/{id}')
config.add_route('api_hardware_profiles_audit', '/api/hardware_profiles_audit')
config.add_route('api_hardware_profile_audit_r', '/api/hardware_profiles_audit/{id}/{resource}')
config.add_route('api_hardware_profile_audit', '/api/hardware_profiles_audit/{id}')
config.add_route('api_ip_addresses', '/api/ip_addresses')
config.add_route('api_ip_address_r', '/api/ip_addresses/{id}/{resource}')
config.add_route('api_ip_address', '/api/ip_addresses/{id}')
config.add_route('api_ip_addresses_audit', '/api/ip_addresses_audit')
config.add_route('api_ip_address_audit_r', '/api/ip_addresses_audit/{id}/{resource}')
config.add_route('api_ip_address_audit', '/api/ip_addresses_audit/{id}')
config.add_route('api_operating_systems', '/api/operating_systems')
config.add_route('api_operating_system_r', '/api/operating_systems/{id}/{resource}')
config.add_route('api_operating_system', '/api/operating_systems/{id}')
config.add_route('api_operating_systems_audit', '/api/operating_systems_audit')
config.add_route('api_operating_system_audit_r', '/api/operating_systems_audit/{id}/{resource}')
config.add_route('api_operating_system_audit', '/api/operating_systems_audit/{id}')
config.add_route('api_physical_devices', '/api/physical_devices')
config.add_route('api_physical_device_r', '/api/physical_devices/{id}/{resource}')
config.add_route('api_physical_device', '/api/physical_devices/{id}')
config.add_route('api_physical_devices_audit', '/api/physical_devices_audit')
config.add_route('api_physical_device_audit_r', '/api/physical_devices_audit/{id}/{resource}')
config.add_route('api_physical_device_audit', '/api/physical_devices_audit/{id}')
config.add_route('api_physical_elevations', '/api/physical_elevations')
config.add_route('api_physical_elevation_r', '/api/physical_elevations/{id}/{resource}')
config.add_route('api_physical_elevation', '/api/physical_elevations/{id}')
config.add_route('api_physical_elevations_audit', '/api/physical_elevations_audit')
config.add_route('api_physical_elevation_audit_r', '/api/physical_elevations_audit/{id}/{resource}')
config.add_route('api_physical_elevation_audit', '/api/physical_elevations_audit/{id}')
config.add_route('api_physical_locations', '/api/physical_locations')
config.add_route('api_physical_location_r', '/api/physical_locations/{id}/{resource}')
config.add_route('api_physical_location', '/api/physical_locations/{id}')
config.add_route('api_physical_locations_audit', '/api/physical_locations_audit')
config.add_route('api_physical_location_audit_r', '/api/physical_locations_audit/{id}/{resource}')
config.add_route('api_physical_location_audit', '/api/physical_locations_audit/{id}')
config.add_route('api_physical_racks', '/api/physical_racks')
config.add_route('api_physical_rack_r', '/api/physical_racks/{id}/{resource}')
config.add_route('api_physical_rack', '/api/physical_racks/{id}')
config.add_route('api_physical_racks_audit', '/api/physical_racks_audit')
config.add_route('api_physical_rack_audit_r', '/api/physical_racks_audit/{id}/{resource}')
config.add_route('api_physical_rack_audit', '/api/physical_racks_audit/{id}')
config.add_route('api_node_groups', '/api/node_groups')
config.add_route('api_node_group_r', '/api/node_groups/{id}/{resource}')
config.add_route('api_node_group', '/api/node_groups/{id}')
config.add_route('api_b_node_groups_deassign', '/api/bulk/node_groups/deassign')
config.add_route('api_node_groups_audit', '/api/node_groups_audit')
config.add_route('api_node_group_audit_r', '/api/node_groups_audit/{id}/{resource}')
config.add_route('api_node_group_audit', '/api/node_groups_audit/{id}')
config.add_route('api_hypervisor_vm_assignments', '/api/hypervisor_vm_assignments')
config.add_route('api_hypervisor_vm_assignment_r', '/api/hypervisor_vm_assignments/{id}/{resource}')
config.add_route('api_hypervisor_vm_assignment', '/api/hypervisor_vm_assignments/{id}')
config.add_route('api_ec2_instances', '/api/ec2_instances')
config.add_route('api_ec2_instance_r', '/api/ec2_instances/{id}/{resource}')
config.add_route('api_ec2_instance', '/api/ec2_instances/{id}')
config.add_route('api_ec2_instances_audit', '/api/ec2_instances_audit')
config.add_route('api_ec2_instance_audit_r', '/api/ec2_instances_audit/{id}/{resource}')
config.add_route('api_ec2_instance_audit', '/api/ec2_instances_audit/{id}')
config.add_route('api_network_interfaces', '/api/network_interfaces')
config.add_route('api_network_interface_r', '/api/network_interfaces/{id}/{resource}')
config.add_route('api_network_interface', '/api/network_interfaces/{id}')
config.add_route('api_network_interfaces_audit', '/api/network_interfaces_audit')
config.add_route('api_network_interface_audit_r', '/api/network_interfaces_audit/{id}/{resource}')
config.add_route('api_network_interface_audit', '/api/network_interfaces_audit/{id}')
config.add_route('api_reports_db', '/api/reports/db')
config.add_route('api_reports_nodes', '/api/reports/nodes')
config.add_route('api_reports_stale_nodes', '/api/reports/stale_nodes')
config.add_route('api_testing', '/api/testing')
config.add_renderer('json', JSON(indent=2, sort_keys=True))
config.add_renderer('xml', XML())
if settings['arsenal.use_ldap']:
log.info('Configuring ldap users and groups')
# Load the cert if it's defined and exists
if os.path.isfile(settings['arsenal.ldap_cert']):
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings['arsenal.ldap_cert'])
config.ldap_setup(
settings['arsenal.ldap_server'] + ':' + settings['arsenal.ldap_port'],
bind=settings['arsenal.ldap_bind'],
passwd=settings['arsenal.ldap_password'],
)
config.ldap_set_login_query(
base_dn=settings['arsenal.login_base_dn'],
filter_tmpl=settings['arsenal.login_filter'],
scope=ldap.SCOPE_SUBTREE,
cache_period=600,
)
config.ldap_set_groups_query(
base_dn=settings['arsenal.group_base_dn'],
filter_tmpl=settings['arsenal.group_filter'],
scope=ldap.SCOPE_SUBTREE,
cache_period=600,
)
config.set_authentication_policy(
AuthTktAuthenticationPolicy(settings['arsenal.cookie_token'],
callback=global_groupfinder,
max_age=604800,
hashalg='sha512')
)
config.set_authorization_policy(
ACLAuthorizationPolicy()
)
# Load our groups and perms from the db and load them into the ACL
max_retries = 10
for retry in range(1, max_retries):
try:
resp = DBSession.query(Group).all()
for group in resp:
gr_ass = group.get_all_assignments()
if gr_ass:
gr_ass = tuple(gr_ass)
log.info('Adding group: {0} perm: {1}'.format(group.group_name,
gr_ass))
RootFactory.__acl__.append([Allow, group.group_name, gr_ass])
except Exception as ex:
log.warn('Unable to load ACLs from database({0} of {1})! Exception: '
'{2}'.format(retry,
max_retries,
repr(ex)))
sleep_secs = 5
log.warn('Sleeping {0} seconds before retrying'.format(sleep_secs))
time.sleep(sleep_secs)
else:
break
else:
log.warn('Unable to load ACLs from database after {0} retries! '
'Continuing in an ACL-less universe.'.format(max_retries))
config.scan()
return config.make_wsgi_app()
| |
from __future__ import absolute_import
import logging
import threading
from collections import defaultdict, namedtuple
logger = logging.getLogger(__name__)
Offsets = namedtuple("Offsets", "local remote")
class InvalidState(Exception):
pass
class InvalidStateTransition(Exception):
pass
class MessageNotReady(Exception):
pass
class SynchronizedPartitionState:
# The ``SYNCHRONIZED`` state represents that the local offset is equal to
# the remote offset. The local consumer should be paused to avoid advancing
# further beyond the remote consumer.
SYNCHRONIZED = "SYNCHRONIZED"
# The ``LOCAL_BEHIND`` state represents that the remote offset is greater
# than the local offset. The local consumer should be unpaused to avoid
# falling behind the remote consumer.
LOCAL_BEHIND = "LOCAL_BEHIND"
# The ``REMOTE_BEHIND`` state represents that the local offset is greater
# than the remote offset. The local consumer should be paused to avoid
# advancing further beyond the remote consumer.
REMOTE_BEHIND = "REMOTE_BEHIND"
# The ``UNKNOWN`` state represents that we haven't received enough data to
# know the current offset state.
UNKNOWN = "UNKNOWN"
class SynchronizedPartitionStateManager(object):
"""
This class implements a state machine that can be used to track the
consumption progress of a Kafka partition (the "local" consumer) relative
to a the progress of another consumer (the "remote" consumer.)
This is intended to be paired with the ``SynchronizedConsumer``.
"""
transitions = { # from state -> set(to states)
None: frozenset([SynchronizedPartitionState.UNKNOWN]),
SynchronizedPartitionState.UNKNOWN: frozenset(
[
SynchronizedPartitionState.LOCAL_BEHIND,
SynchronizedPartitionState.REMOTE_BEHIND,
SynchronizedPartitionState.SYNCHRONIZED,
]
),
SynchronizedPartitionState.REMOTE_BEHIND: frozenset(
[SynchronizedPartitionState.LOCAL_BEHIND, SynchronizedPartitionState.SYNCHRONIZED]
),
SynchronizedPartitionState.LOCAL_BEHIND: frozenset(
[SynchronizedPartitionState.SYNCHRONIZED, SynchronizedPartitionState.REMOTE_BEHIND]
),
SynchronizedPartitionState.SYNCHRONIZED: frozenset(
[SynchronizedPartitionState.LOCAL_BEHIND, SynchronizedPartitionState.REMOTE_BEHIND]
),
}
def __init__(self, callback):
self.partitions = defaultdict(lambda: (None, Offsets(None, None)))
self.callback = callback
self.__lock = threading.RLock()
def get_state_from_offsets(self, offsets):
"""
Derive the partition state by comparing local and remote offsets.
"""
if offsets.local is None or offsets.remote is None:
return SynchronizedPartitionState.UNKNOWN
else:
if offsets.local < offsets.remote:
return SynchronizedPartitionState.LOCAL_BEHIND
elif offsets.remote < offsets.local:
return SynchronizedPartitionState.REMOTE_BEHIND
else: # local == remote
return SynchronizedPartitionState.SYNCHRONIZED
def set_local_offset(self, topic, partition, local_offset):
"""
Update the local offset for a topic and partition.
If this update operation results in a state change, the callback
function will be invoked.
"""
with self.__lock:
previous_state, previous_offsets = self.partitions[(topic, partition)]
if previous_offsets.local is not None and (
local_offset is None or local_offset < previous_offsets.local
):
logger.info(
"Local offset for %s/%s has moved backwards (current: %s, previous: %s)",
topic,
partition,
local_offset,
previous_offsets.local,
)
updated_offsets = Offsets(local_offset, previous_offsets.remote)
updated_state = self.get_state_from_offsets(updated_offsets)
if (
previous_state is not updated_state
and updated_state not in self.transitions[previous_state]
):
raise InvalidStateTransition(
u"Unexpected state transition for {}/{} from {} to {}".format(
topic, partition, previous_state, updated_state
)
)
self.partitions[(topic, partition)] = (updated_state, updated_offsets)
if previous_state is not updated_state:
if updated_state == SynchronizedPartitionState.REMOTE_BEHIND:
logger.warning(
"Current local offset for %s/%s (%s) exceeds remote offset (%s)!",
topic,
partition,
updated_offsets.local,
updated_offsets.remote,
)
self.callback(
topic,
partition,
(previous_state, previous_offsets),
(updated_state, updated_offsets),
)
def set_remote_offset(self, topic, partition, remote_offset):
"""
Update the remote offset for a topic and partition.
If this update operation results in a state change, the callback
function will be invoked.
"""
with self.__lock:
previous_state, previous_offsets = self.partitions[(topic, partition)]
if previous_offsets.remote is not None and (
remote_offset is None or remote_offset < previous_offsets.remote
):
logger.info(
"Remote offset for %s/%s has moved backwards (current: %s, previous: %s)",
topic,
partition,
remote_offset,
previous_offsets.remote,
)
updated_offsets = Offsets(previous_offsets.local, remote_offset)
updated_state = self.get_state_from_offsets(updated_offsets)
if (
previous_state is not updated_state
and updated_state not in self.transitions[previous_state]
):
raise InvalidStateTransition(
u"Unexpected state transition for {}/{} from {} to {}".format(
topic, partition, previous_state, updated_state
)
)
self.partitions[(topic, partition)] = (updated_state, updated_offsets)
if previous_state is not updated_state:
self.callback(
topic,
partition,
(previous_state, previous_offsets),
(updated_state, updated_offsets),
)
def validate_local_message(self, topic, partition, offset):
"""
Check if a message should be consumed by the local consumer.
The local consumer should be prevented from consuming messages that
have yet to have been committed by the remote consumer.
"""
with self.__lock:
state, offsets = self.partitions[(topic, partition)]
if state is not SynchronizedPartitionState.LOCAL_BEHIND:
raise InvalidState(
"Received a message while consumer is not in LOCAL_BEHIND state!"
)
if offset >= offsets.remote:
raise MessageNotReady(
"Received a message that has not been committed by remote consumer"
)
if offset < offsets.local:
logger.warning(
"Received a message prior to local offset (local consumer offset rewound without update?)"
)
| |
""" Module to test exec_bowtie member methods.
This module contains unit tests for exec_bowtie.py.
"""
import os
import unittest
import shutil
from src import exec_bowtie
__author__ = "YiDing Fang"
__maintainer__ = "YiDing Fang"
__email__ = "yif017@eng.ucsd.edu"
__status__ = "prototype"
# input file contents. For future use.
_TRIM_R1_FASTQ_STR = """"""
_TRIM_R2_FASTQ_STR = """"""
_BOWTIE_PATH = '/usr/local/bowtie2-2.2.9/bowtie2'
class TestExecBowtie(unittest.TestCase):
""" Unit test exec_megahit methods """
def setup_path(self):
""" create strings corresponding to the temporary directories and files to be used in unit tests """
# build executable extension
_BUILD_EXT = '-build'
# input directories
_EXAMPLE_DIR = 'example'
_INDEX_DIR = 'index'
_REFERENCE_DIR = 'reference'
_READS_DIR = 'reads'
# reference files base name
_REFERENCE_FA_STR = 'lambda_virus.fa'
# base name for the sample scaffold files found in ./bowtie
_SCAFFOLD_BASE_STR = 'lambda_virus'
# input reads
_SAMPLE_1_FQ_STR = 'reads_1.fq'
_SAMPLE_2_FQ_STR = 'reads_2.fq'
# output file name
_OUTPUT_SAM_STR = 'lambda_virus_sample.sam'
# temporary directories to be used
_UNITTEST_DIR_STR = 'bowtie_unittest_temp_dir'
_OUTPUT_DIR_STR = 'output'
_INPUT_DIR_STR = 'input'
# full file paths
self.bowtie_path = _BOWTIE_PATH
self.bowtie_build_path = self.bowtie_path + _BUILD_EXT
# full file paths
self.unittest_dir = _UNITTEST_DIR_STR
self.output_dir = os.path.join(_UNITTEST_DIR_STR, _OUTPUT_DIR_STR)
self.input_dir = os.path.join(_UNITTEST_DIR_STR, _INPUT_DIR_STR)
# output index directory and index bt2 files
self.output_index_dir = os.path.join(self.output_dir, _INDEX_DIR)
self.output_scaffold_index = os.path.join(self.output_index_dir, _SCAFFOLD_BASE_STR)
# output sam file
self.output_sam = os.path.join(self.output_dir, _OUTPUT_SAM_STR)
bowtie_dir, executable = os.path.split(_BOWTIE_PATH)
bowtie_example_dir = os.path.join(bowtie_dir, _EXAMPLE_DIR)
bowtie_index_dir = os.path.join(bowtie_example_dir, _INDEX_DIR)
bowtie_reference_dir = os.path.join(bowtie_example_dir, _REFERENCE_DIR)
bowtie_reads_dir = os.path.join(bowtie_example_dir, _READS_DIR)
self.index_dir = bowtie_index_dir
self.scaffold_index = os.path.join(bowtie_index_dir, _SCAFFOLD_BASE_STR)
self.sample_reference_fa = os.path.join(bowtie_reference_dir, _REFERENCE_FA_STR)
self.sample_fq_1 = os.path.join(bowtie_reads_dir, _SAMPLE_1_FQ_STR)
self.sample_fq_2 = os.path.join(bowtie_reads_dir, _SAMPLE_2_FQ_STR)
# TODO: Check if the OSError is thrown in case we remove something improperly
def clear_dir(self, target_dir):
""" Selectively remove files in a directory using the given file extension names """
# output file extensions
_OUT_EXT = ['.fq', '.fastq', '.fa', '.fasta', '.txt', '.lib', '.bin', '.info', '.lib_info',
'.log', '.tex', '.txt', '.tsv', '.pdf', '.sam', '.bt2']
if os.path.exists(target_dir):
# remove all the files in the intermediate contigs directory
filelist = [f for f in os.listdir(target_dir) if f.endswith(tuple(_OUT_EXT))]
for f in filelist:
f_path = os.path.join(target_dir, f)
os.remove(f_path)
def setUp(self):
""" create temporary files and directories to be used in the unit tests """
self.setup_path()
# create a sample directory to use for input and output
if not os.path.exists(self.unittest_dir):
os.makedirs(self.unittest_dir)
print("created directory: {0}".format(self.unittest_dir))
else:
print("There exists conflicting directory named: {0}".format(self.unittest_dir))
temp_dir_list = [self.input_dir, self.output_dir, self.output_index_dir]
for temp_dir in temp_dir_list:
# create the appropriate directories
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
print("created directory: {0}".format(temp_dir))
else:
print("There exists conflicting directory named: {0}".format(temp_dir))
input_test_files = [self.sample_fq_1, self.sample_fq_2, self.sample_reference_fa]
for test_file in input_test_files:
if not os.path.isfile(test_file):
raise ValueError( "Input file {0} does not exist. Please check metaquast/test_Data directory for sample test files".format(test_file))
def tearDown(self):
"""delete temporary files and directories generated by setUp method and megahit subprocess calls"""
if os.path.exists(self.unittest_dir):
if os.path.exists(self.input_dir):
self.clear_dir(self.input_dir)
os.rmdir(self.input_dir)
print("removed directory: {0}".format(self.input_dir))
if os.path.exists(self.output_dir):
if os.path.exists(self.output_sam):
os.remove(self.output_sam)
expected_sub_dir_list = [self.output_index_dir]
for sub_dir in expected_sub_dir_list:
if os.path.exists(sub_dir):
shutil.rmtree(sub_dir)
print("removed directory: {0}".format(sub_dir))
os.rmdir(self.output_dir)
print("removed directory: {0}".format(self.output_dir))
# remove the unittest directory
os.rmdir(self.unittest_dir)
print("removed directory: {0}".format(self.unittest_dir))
else:
print("The unittest directory {0} does not exist".format(self.unittest_dir))
# region form_bowtie_build_cmd_list tests
def test_form_bowtie_build_cmd_list_no_args(self):
"""test that the form_bowtie_build_cmd_list correctly raises a Value Error when invalid empty string is used in
place of required input"""
# arguments to be formatted
null_bowtie_build_path = ''
null_input_contigs_fa = ''
null_output_index = ''
with self.assertRaises(ValueError):
exec_bowtie.form_bowtie_build_cmd_list(null_bowtie_build_path, null_input_contigs_fa, null_output_index)
def test_form_bowtie_build_cmd_list_invalid_num_args(self):
"""test that form_bowtie_build_cmd_list correctly raises a Type Error when the wrong number of input arguments
is used"""
with self.assertRaises(TypeError):
exec_bowtie.form_bowtie_build_cmd_list(self.bowtie_path)
def test_form_bowtie_build_cmd_list(self):
"""test shall check that from_bowtie_build_cmd_list correctly generates bowtie command list when passed valid
arguments for the bowtie-build file path, input contigs fasta file, and output index directory"""
cmd_bowtie_build_list = ['/usr/local/bowtie2-2.2.9/bowtie2-build',
'/usr/local/bowtie2-2.2.9/example/reference/lambda_virus.fa',
'bowtie_unittest_temp_dir/output/index/lambda_virus']
self.assertEqual(cmd_bowtie_build_list, exec_bowtie.form_bowtie_build_cmd_list(self.bowtie_build_path,
self.sample_reference_fa,
self.output_scaffold_index))
# endregion
# region form_bowtie_cmd_list tests
def test_form_bowtie_cmd_list_no_args(self):
"""test that the form_bowtie_cmd_list correctly raises a Value Error when invalid empty string is used in place
of required input"""
# arguments to be formatted
null_bowtie_path = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.form_bowtie_cmd_list(null_bowtie_path, null_index_path, null_pe1_fastq, null_pe2_fastq,
null_u_fastq, null_output_sam_path)
def test_form_bowtie_cmd_list_invalid_num_args(self):
"""test that form_bowtie_cmd_list correctly raises a Type Error when the wrong number of
input arguments is used"""
with self.assertRaises(TypeError):
exec_bowtie.form_bowtie_cmd_list(self.bowtie_path)
def test_form_bowtie_cmd_list(self):
"""test that form_bowtie_cmd_list correctly generates bowtie command list when passed valid arguments for
the bowtie file path, input index base name, input forward, reverse, and unpaired fastq files, and the
path to the output sam file"""
cmd_bowtie_list = ['/usr/local/bowtie2-2.2.9/bowtie2',
'-x', '/usr/local/bowtie2-2.2.9/example/index/lambda_virus',
'-1', '/usr/local/bowtie2-2.2.9/example/reads/reads_1.fq',
'-2', '/usr/local/bowtie2-2.2.9/example/reads/reads_2.fq',
'-S', 'bowtie_unittest_temp_dir/output/lambda_virus_sample.sam']
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
self.assertEqual(cmd_bowtie_list, exec_bowtie.form_bowtie_cmd_list(self.bowtie_path, self.scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u,
self.output_sam))
# endregion
# region run_bowtie_build_cmd_list
def test_run_bowtie_build_no_args(self):
"""test that run_bowtie correctly raises a Value Error when invalid empty string is used in place of
required input"""
# arguments to be formatted
null_bowtie_build_path = ''
null_input_contigs_fa = ''
null_output_index = ''
with self.assertRaises(ValueError):
exec_bowtie.run_bowtie_build(null_bowtie_build_path, null_input_contigs_fa, null_output_index)
def test_run_bowtie_build_invalid_num_args(self):
"""test that run_bowtie_build correctly raises a Type Error when the wrong number of input arguments
is used"""
with self.assertRaises(TypeError):
exec_bowtie.run_bowtie_build(self.bowtie_path)
def test_run_bowtie_build_with_existing_output_index(self):
"""test that run_bowtie_build correctly raises an OSError when the specified output index base name exists"""
_BOTIE_INDEX_FILE_EXT = '.bt2'
_SAMPLE_BOWTIE_STR = 'unittest test_run_bowtie_build_with_existing_output_index'
if not os.path.exists(self.output_index_dir):
os.mkdir(self.output_index_dir)
for x in range(1,7):
sample_index_str = self.output_scaffold_index + '.' + str(x) + _BOTIE_INDEX_FILE_EXT
sample_index_file = open(sample_index_str, 'w+')
sample_index_file.write(_SAMPLE_BOWTIE_STR)
sample_index_file.close()
with self.assertRaises(OSError):
exec_bowtie.run_bowtie_build(self.bowtie_build_path, self.sample_reference_fa, self.output_scaffold_index)
def test_run_bowtie_build_good_stderr(self):
"""test that bowtie2-build subprocess call does not report an execution Error when run_bowtie is passed valid
arguments for the bowtie path, input contigs, and output index base name"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
stdout, stderr = exec_bowtie.run_bowtie_build(self.bowtie_build_path, self.sample_reference_fa,
self.output_scaffold_index)
self.assertEqual(stderr.find(_BOWTIE_EXECUTION_ERROR), -1)
def test_run_bowtie_build_index_exists(self):
"""test that bowtie2-build subprocess call correctly generates nonempty index directory when run_bowtie is
passed valid arguments for the bowtie path, input contigs, and output index base name"""
index_file_count = 0
output, err = exec_bowtie.run_bowtie_build(self.bowtie_build_path,
self.sample_reference_fa, self.output_scaffold_index)
if os.stat(self.output_index_dir) > 0:
for f in os.listdir(self.output_index_dir):
index_file_count += 1
self.assertTrue(index_file_count > 0)
# endregion
# region run_bowtie tests
def test_run_bowtie_no_args(self):
"""test that the form_bowtie_cmd_list correctly raises a Value Error when invalid empty string is used in place
of required input"""
# arguments to be formatted
null_bowtie_path = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.run_bowtie(null_bowtie_path, null_index_path, null_pe1_fastq, null_pe2_fastq,
null_u_fastq, null_output_sam_path)
def test_run_bowtie_invalid_num_args(self):
"""test that form_bowtie_cmd_list correctly raises a Type Error when the wrong number of
input arguments is used"""
with self.assertRaises(TypeError):
exec_bowtie.run_bowtie(self.bowtie_path)
def test_run_bowtie_cmd_good_stderr(self):
"""test that bowtie2 subprocess call does not report execution errors when run_bowtie is passed valid
arguments for the bowtie file path, input index base name, input forward, reverse, and unpaired fastq files, and
the path to the output sam file"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
stdout, stderr = exec_bowtie.run_bowtie(self.bowtie_path, self.scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
self.assertEqual(stderr.find(_BOWTIE_EXECUTION_ERROR), -1)
def test_run_bowtie_cmd_list_output_sam_exists(self):
"""test that bowtie2 subprocess call generates the expected output sam file when run_bowtie is passed
valid arguments for the bowtie file path, input index base name, path to the output sam file, and
input forward, reverse, and unpaired fastq files"""
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.run_bowtie(self.bowtie_path, self.scaffold_index, sample_fq_1,
sample_fq_2, sample_fq_u, self.output_sam)
self.assertTrue(os.stat(self.output_sam) > 0)
# endregion
# region build_run_bowtie tests
def test_build_run_bowtie_no_args(self):
"""test that build_run_bowtie correctly raises a Value Error when invalid empty arguments are passed instead of
expected bowtie file path, reference contigs, output index base name, unpaired and paired end fastq, and an
output sam file path"""
# arguments to be formatted
null_bowtie_path = ''
null_input_contigs_fa = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.build_run_bowtie(null_bowtie_path, null_input_contigs_fa, null_index_path, null_pe1_fastq,
null_pe2_fastq, null_u_fastq, null_output_sam_path)
def test_build_run_bowtie_invalid_num_args(self):
"""test that build_run_bowtie correctly raises a Type Error when the wrong number of arguments are passed"""
with self.assertRaises(TypeError):
exec_bowtie.build_run_bowtie(self.bowtie_path)
def test_build_run_bowtie_with_existing_output_index(self):
"""test that build_run_bowtie_exsiting_output_index correctly raises an OSError when
the specified output index base name exists"""
_BOTIE_INDEX_FILE_EXT = '.bt2'
_SAMPLE_BOWTIE_STR = 'unittest test_build_run_bowtie_with_existing_output_index'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
if not os.path.exists(self.output_index_dir):
os.mkdir(self.output_index_dir)
for x in range(1,7):
sample_index_str = self.output_scaffold_index + '.' + str(x) + _BOTIE_INDEX_FILE_EXT
sample_index_file = open(sample_index_str, 'w+')
sample_index_file.write(_SAMPLE_BOWTIE_STR)
sample_index_file.close()
with self.assertRaises(OSError):
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
def test_build_run_bowtie_cmd_good_stderr(self):
"""test that bowtie2-build and bowtie2 suprocess call do not report execution errors when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unpaired fastq files"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
buildout, builderr, stdout, stderr = exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa,
self.output_scaffold_index, sample_fq_1, sample_fq_2, sample_fq_u,
self.output_sam)
self.assertTrue(builderr.find(_BOWTIE_EXECUTION_ERROR) is -1 and
stderr.find(_BOWTIE_EXECUTION_ERROR) is -1)
def test_build_run_bowtie_index_exists(self):
"""test that bowtie2-build subprocess call generates nonempty output index directory when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unparied fastq files"""
index_file_count = 0
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
if os.stat(self.output_index_dir) > 0:
for f in os.listdir(self.output_index_dir):
index_file_count += 1
self.assertTrue(index_file_count > 0)
def test_build_run_bowtie_output_sam_exists(self):
"""test that bowtie2-build subprocess call generates the expected output sam file when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unparied fastq files"""
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
self.assertTrue(os.stat(self.output_sam) > 0)
# endregion
| |
''' a Triangle
'''
import math
import collections
import itertools
from . import Polygon, Point, Segment, Circle
from .constants import Epsilon, Half_Pi, nearly_eq, Sqrt_3
from .exceptions import *
class Triangle(Polygon):
'''a pythonic Triangle
Implements a Triangle object in the XY plane having three
non-coincident vertices and three intersecting edges.
Vertices are labeled; 'A', 'B' and 'C'.
Edges are labeled; 'AB', 'BC' and 'AC'.
The length of edges opposite each vertex are labeled:
'a' for the side opposite vertex A.
'b' for the side opposite vertex B.
'c' for the side opposite vertex C.
Interior angles in radians are labeled:
'alpha' for CAB
'beta' for ABC
'gamma' for BCA
Usage:
>>> a = Triangle()
>>> b = Triangle(A,B,C) # A,B,C are Points or Point equivalents
>>> c = Triangle([p,q,r]) # p,q,r are Points or Point equivalents
>>> d = Triangle([x,y,z],[x,y,z],[x,y,z])
'''
@classmethod
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles")
@classmethod
def withSides(cls, origin=None, a=1, b=1, c=1):
'''
:origin: optional Point
:a: optional float describing length of the side opposite A
:b: optional float describing length of the side opposite B
:c: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified side lengths.
If only 'a' is specified, an equilateral triangle is returned.
'''
raise NotImplementedError("withSides")
@classmethod
def unit(cls,scale=1):
return cls(Point.units(scale))
def __init__(self, *args, **kwds):
'''
:args: iterable of Point or Point equivalents
:kwds: named Points where recognized names are 'A', 'B' and 'C'.
If A is an iterable containing Point or Point equivalent objects
it will be used to initialize up to three points in the triangle.
'''
kwds['defaults'] = Point(),Point(1,0),Point(0,1)
super().__init__(*args,**kwds)
if len(self) != 3:
raise ValueError(len(self))
@property
def AB(self):
return self.pairs('AB')
@AB.setter
def AB(self, iterable):
self.A, self.B = iterable
@property
def BA(self):
return self.pairs('BA')
@BA.setter
def BA(self, iterable):
self.B, self.A = iterable
@property
def BC(self):
return self.pairs('BC')
@BC.setter
def BC(self, iterable):
self.B, self.C = iterable
@property
def CB(self):
return self.pairs('CB')
@CB.setter
def CB(self, iterable):
self.C, self.B = iterable
@property
def AC(self):
return self.pairs('AC')
@AC.setter
def AC(self, iterable):
self.A, self.C = iterable
@property
def CA(self):
return self.pairs('CA')
@CA.setter
def CA(self, iterable):
self.C, self.A = iterable
@property
def ABC(self):
return [self.A, self.B, self.C]
@ABC.setter
def ABC(self, iterable):
self.A, self.B, self.C = iterable
@property
def ccw(self):
'''
Result of A.ccw(B,C), float.
See Point.ccw
'''
return self.A.ccw(self.B, self.C)
@property
def isCCW(self):
'''
True if ABC has a counter-clockwise rotation, boolean.
'''
return self.A.isCCW(self.B,self.C)
@property
def area(self):
'''
Area of the triangle, float.
Performance note: computed via Triangle.ccw (subtractions and
multiplications and a divison).
'''
return abs(self.ccw) / 2
@property
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c)))
@property
def inradius(self):
'''
The radius of the triangle's incircle, float.
'''
return (self.area * 2) / self.perimeter
@property
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y)
@property
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4)
@property
def circumcircle(self):
'''
A circle whose center is equidistant from all the
vertices of the triangle, Circle.
'''
return Circle(self.circumcenter, self.circumradius)
@property
def orthocenter(self):
'''
The intersection of the altitudes of the triangle, Point.
'''
raise NotImplementedError('orthocenter')
@property
def hypotenuse(self):
'''
The longest edge of the triangle, Segment.
'''
return max(self.edges(),key=lambda s:s.length)
@property
def alpha(self):
'''
The angle described by angle CAB in radians, float.
'''
return Segment(self.CA).radiansBetween(Segment(self.BA))
@property
def beta(self):
'''
The angle described by angle ABC in radians, float.
'''
return Segment(self.AB).radiansBetween(Segment(self.CB))
@property
def gamma(self):
'''
The angle described by angle BCA in radians, float.
'''
return Segment(self.BC).radiansBetween(Segment(self.AC))
@property
def angles(self):
'''
A list of the interior angles of the triangle, list of floats.
'''
return [self.alpha, self.beta, self.gamma]
@property
def a(self):
'''
The length of line segment BC, opposite vertex A, float.
'''
return abs(self.B.distance(self.C))
@property
def b(self):
'''
The length of line segment AC, opposite vertex B, float.
'''
return abs(self.A.distance(self.C))
@property
def c(self):
'''
The length of line segment AB, opposite vertex C, float.
'''
return abs(self.A.distance(self.B))
@property
def sides(self):
'''
A list of edge lengths [a, b, c], list of floats.
'''
return [self.a, self.b, self.c]
@property
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
A = self.area * 2
return [A / self.a, A / self.b, A / self.c]
@property
def isEquilateral(self):
'''
True iff all side lengths are equal, boolean.
'''
return self.a == self.b == self.c
@property
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
@property
def isScalene(self):
'''
True iff all side lengths are unequal, boolean.
'''
return self.a != self.b != self.c
@property
def isRight(self):
'''
True if one angle measures 90 degrees (Pi/2 radians), float.
'''
return any([nearly_eq(v,Half_Pi) for v in self.angles])
@property
def isObtuse(self):
'''
True if one angle measures greater than 90 degrees (Pi/2 radians),
float.
'''
return any([v > Half_Pi for v in self.angles])
@property
def isAcute(self):
'''
True iff all angles measure less than 90 degrees (Pi/2 radians),
float.
'''
return all([v < Half_Pi for v in self.angles])
def congruent(self, other):
'''
A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0
| |
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
"""Module providing abstract interface class for LSTM sense calssification.
Attributes:
LSTMBaseSenser (class):
abstract class defining interface for explicit and implicit classifier
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function
from dsenser.nnbase import NNBaseSenser
from dsenser.theano_utils import floatX, theano, HE_UNIFORM, ORTHOGONAL, TT
import gc
import numpy as np
##################################################################
# Variables and Constants
TRUNCATE_GRADIENT = 20
##################################################################
# Class
class LSTMBaseSenser(NNBaseSenser):
"""Abstract class for LSTM disambiguation of relation senses.
Attributes:
n_y (int): number of distinct classes
"""
def batch_predict(self, a_rels, a_data, a_ret):
"""Method for predicting sense of multiple relations.
Args:
a_rels (list):
list of input relations
a_data (2-tuple(dict, dict)):
list of input JSON data
a_ret (np.array):
prediction matrix
Returns:
void:
Note:
updates ``a_ret`` in place
"""
rels, parses = a_data
# convert input relations to embedding indices
if self.get_test_w_emb_i is None:
self._init_wemb_funcs()
irels = [self._rel2x(irel, parses, self.get_test_w_emb_i,
self.get_test_c_emb_i)
for irel in rels]
if self.w2v:
self.w2v.unload()
self.w2v = None
gc.collect()
# initialize prediction functions
if self._predict_func is None:
self._init_funcs()
# make predictions
for i, irel in enumerate(irels):
self._predict(irel, a_ret, a_i)
def _init_nn(self):
"""Initialize neural network.
"""
self.intm_dim = max(100, self.ndim - (self.ndim - self.n_y) / 2)
# indices of word embeddings
self.W_INDICES_ARG1 = TT.ivector(name="W_INDICES_ARG1")
self.W_INDICES_ARG2 = TT.ivector(name="W_INDICES_ARG2")
# connective's index
self.CONN_INDEX = TT.iscalar(name="CONN_INDEX")
# initialize the matrix of word embeddings
self.init_w_emb()
# word embeddings of the arguments
self.EMB_ARG1 = self.W_EMB[self.W_INDICES_ARG1]
self.EMB_ARG2 = self.W_EMB[self.W_INDICES_ARG2]
# connective's embedding
self._init_conn_emb()
self.EMB_CONN = self.CONN_EMB[self.CONN_INDEX]
# initialize forward LSTM unit
invars = ((self.EMB_ARG1, False), (self.EMB_ARG2, False))
params, outvars = self._init_lstm(invars)
self._params.extend(params)
self.F_OUT_ARG1, self.F_OUT_ARG2 = outvars
self.F_ARG1 = TT.mean(self.F_OUT_ARG1, axis=0)
self.F_ARG2 = TT.mean(self.F_OUT_ARG2, axis=0)
# define final units
self.I = TT.concatenate((self.F_ARG1, self.F_ARG2,
self.EMB_CONN))
self.I2Y = theano.shared(value=HE_UNIFORM((self.n_y,
self.intm_dim * 3)),
name="I2Y")
self.y_bias = theano.shared(value=HE_UNIFORM((1, self.n_y)),
name="y_bias")
self._params.extend([self.I2Y, self.y_bias])
self.Y_pred = TT.nnet.softmax(TT.dot(self.I2Y, self.I).T + self.y_bias)
# initialize cost and optimization functions
self.Y_gold = TT.vector(name="Y_gold")
self._cost = TT.sum((self.Y_pred - self.Y_gold) ** 2)
self._dev_cost = TT.sum((self.Y_pred - self.Y_gold) ** 2)
self._pred_class = TT.argmax(self.Y_pred)
grads = TT.grad(self._cost, wrt=self._params)
self._init_funcs(grads)
def _init_lstm(self, a_invars, a_sfx="-forward"):
"""Initialize LSTM layer.
Args:
a_invars (list(theano.shared)):
list of input parameters as symbolic theano variable
a_sfx (str):
suffix to use for function and parameter names
Returns:
(2-tuple):
parameters to be optimized and list of symbolic outputs from the
function
"""
intm_dim = self.intm_dim
# initialize transformation matrices and bias term
W_dim = (intm_dim, self.ndim)
W = np.concatenate([ORTHOGONAL(W_dim), ORTHOGONAL(W_dim),
ORTHOGONAL(W_dim), ORTHOGONAL(W_dim)],
axis=0)
W = theano.shared(value=W, name="W" + a_sfx)
U_dim = (intm_dim, intm_dim)
U = np.concatenate([ORTHOGONAL(U_dim), ORTHOGONAL(U_dim),
ORTHOGONAL(U_dim), ORTHOGONAL(U_dim)],
axis=0)
U = theano.shared(value=U, name="U" + a_sfx)
V = ORTHOGONAL(U_dim) # V for vendetta
V = theano.shared(value=V, name="V" + a_sfx)
b_dim = (1, intm_dim * 4)
b = theano.shared(value=HE_UNIFORM(b_dim), name="b" + a_sfx)
params = [W, U, V, b]
# initialize dropout units
w_do = theano.shared(value=floatX(np.ones((4 * intm_dim,))),
name="w_do")
w_do = self._init_dropout(w_do)
u_do = theano.shared(value=floatX(np.ones((4 * intm_dim,))),
name="u_do")
u_do = self._init_dropout(u_do)
# custom function for splitting up matrix parts
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# define recurrent LSTM unit
def _step(x_, h_, c_,
W, U, V, b, w_do, u_do):
"""Recurrent LSTM unit.
Note:
The general order of function parameters to fn is:
sequences (if any), prior result(s) (if needed),
non-sequences (if any)
Args:
x_ (theano.shared): input vector
h_ (theano.shared): output vector
c_ (theano.shared): memory state
W (theano.shared): input transform matrix
U (theano.shared): inner-state transform matrix
V (theano.shared): output transform matrix
b (theano.shared): bias vector
w_do (TT.col): dropout unit for the W matrix
u_do (TT.col): dropout unit for the U matrix
Returns:
(2-tuple(h, c))
new hidden and memory states
"""
# pre-compute common terms:
# W \in R^{236 x 100}
# x \in R^{1 x 100}
# U \in R^{236 x 59}
# h \in R^{1 x 59}
# b \in R^{1 x 236}
# w_do \in R^{236 x 1}
# u_do \in R^{236 x 1}
# xhb \in R^{1 x 236}
xhb = (TT.dot(W * w_do.dimshuffle((0, 'x')), x_.T) +
TT.dot(U * u_do.dimshuffle((0, 'x')), h_.T)).T + b
# i \in R^{1 x 59}
i = TT.nnet.sigmoid(_slice(xhb, 0, intm_dim))
# f \in R^{1 x 59}
f = TT.nnet.sigmoid(_slice(xhb, 1, intm_dim))
# c \in R^{1 x 59}
c = TT.tanh(_slice(xhb, 2, intm_dim))
c = i * c + f * c_
# V \in R^{59 x 59}
# o \in R^{1 x 59}
o = TT.nnet.sigmoid(_slice(xhb, 3, intm_dim) +
TT.dot(V, c.T).T)
# h \in R^{1 x 59}
h = o * TT.tanh(c)
# return current output and memory state
return h.flatten(), c.flatten()
m = 0
n = intm_dim
ov = None
outvars = []
for iv, igbw in a_invars:
m = iv.shape[0]
ret, _ = theano.scan(_step,
sequences=[iv],
outputs_info=[floatX(np.zeros((n,))),
floatX(np.zeros((n,)))],
non_sequences=[W, U, V, b, w_do, u_do],
name="LSTM" + str(iv) + a_sfx,
n_steps=m,
truncate_gradient=TRUNCATE_GRADIENT,
go_backwards=igbw)
ov = ret[0]
outvars.append(ov)
return params, outvars
| |
"""
:Created: 15 August 2015
:Author: Lucas Connors
"""
from django.contrib.auth.models import User
from flamingo.tests import FlamingoTestCase, FlamingoTransactionTestCase
from users.exceptions import UserAlreadyExistsException
from users.forms import LoginForm, RegisterForm
from users.models import UserProfile
class UserTestCase(FlamingoTransactionTestCase):
def testCreateAccount(self):
UserProfile.objects.all().delete()
UserProfile.objects.create_account(
username=self.CREATED_USER_USERNAME,
email=self.CREATED_USER_EMAIL,
password=self.USER_PASSWORD,
first_name=self.CREATED_USER_FIRST_NAME,
last_name=self.CREATED_USER_LAST_NAME,
)
self.assertEqual(User.objects.all().count(), 1)
self.assertEqual(UserProfile.objects.all().count(), 1)
created_user_profile = UserProfile.objects.get()
created_user_profile.bio = self.USER_PROFILE_BIO
self.assertEqual(str(created_user_profile), self.CREATED_USER_USERNAME)
self.assertEqual(created_user_profile.user.username, self.CREATED_USER_USERNAME)
self.assertEqual(created_user_profile.user.email, self.CREATED_USER_EMAIL)
self.assertEqual(
created_user_profile.user.first_name, self.CREATED_USER_FIRST_NAME
)
self.assertEqual(
created_user_profile.user.last_name, self.CREATED_USER_LAST_NAME
)
self.assertEqual(
created_user_profile.full_name(),
"{first} {last}".format(
first=self.CREATED_USER_FIRST_NAME, last=self.CREATED_USER_LAST_NAME
),
)
self.assertEqual(created_user_profile.bio, self.USER_PROFILE_BIO)
def testDeletedAccountAvailableAgain(self):
self.user_profile.delete()
UserProfile.objects.create_account(
username=self.USER_USERNAME,
email=self.USER_EMAIL,
password=self.USER_PASSWORD,
)
self.assertEqual(User.objects.all().count(), 1)
self.assertEqual(UserProfile.objects.all().count(), 1)
def testCannotCreateAccountWithTakenUsername(self):
with self.assertRaises(UserAlreadyExistsException):
UserProfile.objects.create_account(
username=self.USER_USERNAME,
email=self.CREATED_USER_EMAIL,
password=self.USER_PASSWORD,
)
def testCannotCreateAccountWithTakenEmail(self):
with self.assertRaises(UserAlreadyExistsException):
UserProfile.objects.create_account(
username=self.CREATED_USER_USERNAME,
email=self.USER_EMAIL,
password=self.USER_PASSWORD,
)
class UserAdminWebTestCase(FlamingoTestCase):
def get200s(self):
return [
"/admin/users/",
"/admin/users/userprofile/",
"/admin/users/userprofile/add/",
"/admin/users/userprofile/{userprofile_id}/change/".format(
userprofile_id=self.user_profile.id
),
]
class RegisterFormTestCase(FlamingoTestCase):
def testUsernameAlreadyTaken(self):
form_data = {
"username": self.USER_USERNAME,
"email": self.CREATED_USER_EMAIL,
"password": self.USER_PASSWORD,
"password_confirm": self.USER_PASSWORD,
}
form = RegisterForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
def testEmailAlreadyRegistered(self):
form_data = {
"username": self.CREATED_USER_USERNAME,
"email": self.USER_EMAIL,
"password": self.USER_PASSWORD,
"password_confirm": self.USER_PASSWORD,
}
form = RegisterForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("email", form.errors)
def testPasswordsDoNotMatch(self):
form_data = {
"username": self.CREATED_USER_USERNAME,
"email": self.CREATED_USER_EMAIL,
"password": self.USER_PASSWORD,
"password_confirm": "oops123",
}
form = RegisterForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
self.assertEqual(len(form.errors["__all__"]), 1)
(error,) = form.errors["__all__"]
self.assertIn("Password", error)
self.assertIn("not match", error)
class RegisterWebTestCase(FlamingoTestCase):
def testRegisterPageRenders(self):
self.client.logout()
self.assertResponseRenders("/register")
def testUserRegisters(self):
self.client.logout()
UserProfile.objects.all().delete()
self.assertResponseRenders("/register")
payload = {
"username": self.CREATED_USER_USERNAME,
"email": self.CREATED_USER_EMAIL,
"password": self.USER_PASSWORD,
"password_confirm": self.USER_PASSWORD,
"first_name": self.CREATED_USER_FIRST_NAME,
"last_name": self.CREATED_USER_LAST_NAME,
}
self.assertResponseRedirects("/register", "/", method="POST", data=payload)
self.assertEqual(UserProfile.objects.all().count(), 1)
def testRedirectsAuthenticatedUsersToHome(self):
self.assertResponseRedirects("/register", "/")
class LoginFormTestCase(FlamingoTestCase):
UNCREATED_USER_USERNAME = "UncreatedUser"
UNCREATED_USER_EMAIL = "uncreated@example.com"
def testUsernameDoesNotExist(self):
form_data = {
"username": self.UNCREATED_USER_USERNAME,
"password": self.USER_PASSWORD,
}
form = LoginForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
self.assertEqual(len(form.errors["__all__"]), 1)
(error,) = form.errors["__all__"]
self.assertEqual(error, LoginForm.FAILED_AUTH_WARNING)
def testEmailDoesNotExist(self):
form_data = {
"username": self.UNCREATED_USER_EMAIL,
"password": self.USER_PASSWORD,
}
form = LoginForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
self.assertEqual(len(form.errors["__all__"]), 1)
(error,) = form.errors["__all__"]
self.assertEqual(error, LoginForm.FAILED_AUTH_WARNING)
def testPasswordIncorrect(self):
form_data = {"username": self.USER_USERNAME, "password": "oops123"}
form = LoginForm(form_data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
self.assertEqual(len(form.errors["__all__"]), 1)
(error,) = form.errors["__all__"]
self.assertEqual(error, LoginForm.FAILED_AUTH_WARNING)
class LoginWebTestCase(FlamingoTestCase):
def testLoginPageRenders(self):
self.client.logout()
self.assertResponseRenders("/login")
def testUserLogsIn(self):
self.client.logout()
self.assertResponseRenders("/login")
payload = {"username": self.USER_USERNAME, "password": self.USER_PASSWORD}
self.assertResponseRedirects("/login", "/", method="POST", data=payload)
def testUserLogsInWithEmail(self):
self.client.logout()
self.assertResponseRenders("/login")
payload = {"username": self.USER_EMAIL, "password": self.USER_PASSWORD}
self.assertResponseRedirects("/login", "/", method="POST", data=payload)
def testRedirectsAuthenticatedUsersToHome(self):
self.assertResponseRedirects("/login", "/")
class LogoutWebTestCase(FlamingoTestCase):
def testRedirectsAfterLogoutToLogin(self):
self.assertResponseRedirects("/logout", "/login")
def testRedirectsUnauthenticatedUsersToLogin(self):
self.client.logout()
self.assertResponseRedirects("/logout", "/login")
class ProfileWebTestCase(FlamingoTestCase):
def get200s(self):
return ["/profile"]
def testRedirectsUnauthenticatedUsersToLogin(self):
self.client.logout()
self.assertResponseRedirects("/profile", "/login/")
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import time
from oslo.config import cfg
from nova import conductor
from nova import context
from nova.db import base
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
help='URL for connecting to quantum'),
cfg.IntOpt('quantum_url_timeout',
default=30,
help='timeout value for connecting to quantum in seconds'),
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
help='password for connecting to quantum in admin context',
secret=True),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_region_name',
help='region name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
cfg.BoolOpt('quantum_api_insecure',
default=False,
help='if set, ignore any SSL validation issues'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
# TODO(berrange) temporary hack until Quantum can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('quantum_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('quantum_extension_sync_interval',
default=600,
help='Number of seconds before querying quantum for'
' extensions'),
]
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
refresh_cache = network_api.refresh_cache
update_instance_info_cache = network_api.update_instance_cache_with_nw_info
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
conductor_api = conductor.API()
security_group_api = openstack_driver.get_openstack_security_group_driver()
def __init__(self):
super(API, self).__init__()
self.last_quantum_extension_sync = None
self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
quantum = quantumv2.get_client(context)
# If user has specified to attach instance only to specific
# networks, add them to **search_opts
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {"tenant_id": project_id, 'shared': False}
if net_ids:
search_opts['id'] = net_ids
nets = quantum.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
if net_ids:
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
TODO(someone): document the rest of these parameters.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: QuantumV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = quantum.show_port(port_id)['port']
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = quantum.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
msg = (_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific."),
security_group)
raise exception.NoUniqueMatch(msg)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
security_group_ids.append(name_match)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
if port:
quantum.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
else:
if fixed_ips.get(network_id):
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = (
security_group_ids)
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
self._populate_quantum_extension_values(instance,
port_req_body)
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
for port_id in created_port_ids:
try:
quantum.delete_port(port_id)
except Exception as ex:
msg = _("Fail to delete port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': port_id,
'exception': ex})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
nw_info = self.get_instance_nw_info(context, instance, networks=nets,
conductor_api=kwargs.get('conductor_api'))
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids])
def _refresh_quantum_extensions_cache(self):
if (not self.last_quantum_extension_sync or
((time.time() - self.last_quantum_extension_sync)
>= CONF.quantum_extension_sync_interval)):
quantum = quantumv2.get_client(context.get_admin_context())
extensions_list = quantum.list_extensions()['extensions']
self.last_quantum_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _populate_quantum_extension_values(self, instance, port_req_body):
self._refresh_quantum_extensions_cache()
if 'nvp-qos' in self.extensions:
rxtx_factor = instance['instance_type'].get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
for port in ports:
try:
quantumv2.get_client(context).delete_port(port['id'])
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(portid)s ")
% {'portid': port['id']})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)],
conductor_api=conductor_api)
def deallocate_port_for_instance(self, context, instance, port_id,
conductor_api=None):
try:
quantumv2.get_client(context).delete_port(port_id)
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(port_id)s ") %
locals())
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
return self.get_instance_nw_info(context, instance,
conductor_api=conductor_api)
def list_ports(self, context, **search_opts):
return quantumv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
return quantumv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
conductor_api=None):
result = self._get_instance_nw_info(context, instance, networks)
update_instance_info_cache(self, context, instance, result,
conductor_api)
return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
def add_fixed_ip_to_instance(self, context, instance, network_id,
conductor_api=None):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = [{'subnet_id': subnet['id']}]
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
return
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
def remove_fixed_ip_from_instance(self, context, instance, address,
conductor_api=None):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
LOG.debug(_('validate_networks() for %s'),
requested_networks)
if not requested_networks:
return
net_ids = []
for (net_id, _i, port_id) in requested_networks:
if not port_id:
net_ids.append(net_id)
continue
port = quantumv2.get_client(context).show_port(port_id).get('port')
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
net_id = port['network_id']
if net_id in net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
net_ids.append(net_id)
nets = self._get_available_networks(context, context.project_id,
net_ids)
if len(nets) != len(net_ids):
requsted_netid_set = set(net_ids)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requsted_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def trigger_instance_add_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_add_security_group', instance_ref, group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_remove_security_group', instance_ref, group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
self.conductor_api.security_groups_trigger_members_refresh(
admin_context, group_ids)
self.conductor_api.security_groups_trigger_handler(admin_context,
'security_group_members', group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
client = quantumv2.get_client(context)
networks = client.list_networks().get('networks') or {}
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = quantumv2.get_client(context)
fip = client.show_floatingip(id)['floatingip']
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = quantumv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Quantum v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Quantum v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = quantumv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return []
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = quantumv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
fip = client.create_floatingip(param)
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
data = quantumv2.get_client(context,
admin=True).list_ports(**search_opts)
ports = data.get('ports', [])
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'])
else:
# ensure ports are in preferred network order
_ensure_requested_network_ordering(
lambda x: x['network_id'],
ports,
[n['id'] for n in networks])
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
break
if network_name is None:
raise exception.NotFound(_('Network %(net)s for '
'port %(port_id)s not found!') %
{'net': port['network_id'],
'port': port['id']})
network_IPs = [network_model.FixedIP(address=ip_address)
for ip_address in [ip['ip_address']
for ip in port['fixed_ips']]]
# TODO(gongysh) get floating_ips for each fixed_ip
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Quantum should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.quantum_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
devname = "tap" + port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
network=network,
type=port.get('binding:vif_type'),
ovs_interfaceid=ovs_interfaceid,
devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = quantumv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| |
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from .service import Service
from .container import Container
from .packages.docker.errors import APIError
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
get_service_names = lambda links: [link.split(':')[0] for link in links]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
dependents = [m for m in services if (n['name'] in get_service_names(m.get('links', []))) or (n['name'] in m.get('volumes_from', []))]
for m in dependents:
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
project.services.append(Service(client=client, project=name, links=links, volumes_from=volumes_from, **service_dict))
return project
@classmethod
def from_config(cls, name, config, client):
dicts = []
for service_name, service in list(config.items()):
if not isinstance(service, dict):
raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your fig.yml must map to a dictionary of configuration options.')
service['name'] = service_name
dicts.append(service)
return cls.from_dicts(name, dicts, client)
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def get_services(self, service_names=None, include_links=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_links is specified, returns a list including the links for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve links.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=[s.name for s in self.services],
include_links=include_links
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_links:
services = reduce(self._inject_links, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.stop(**options)
def kill(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.kill(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self, service_names=None, start_links=True, recreate=True):
running_containers = []
for service in self.get_services(service_names, include_links=start_links):
if recreate:
for (_, container) in service.recreate_containers():
running_containers.append(container)
else:
for container in service.start_or_create_containers():
running_containers.append(container)
return running_containers
def remove_stopped(self, service_names=None, **options):
for service in self.get_services(service_names):
service.remove_stopped(**options)
def containers(self, service_names=None, *args, **kwargs):
l = []
for service in self.get_services(service_names):
for container in service.containers(*args, **kwargs):
l.append(container)
return l
def _inject_links(self, acc, service):
linked_names = service.get_linked_names()
if len(linked_names) > 0:
linked_services = self.get_services(
service_names=linked_names,
include_links=True
)
else:
linked_services = []
linked_services.append(service)
return acc + linked_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class ConfigurationError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
| |
#fontmetrics.py - part of PDFgen - copyright Andy Robinson 1999
"""This contains pre-canned text metrics for the PDFgen package, and may also
be used for any other PIDDLE back ends or packages which use the standard
Type 1 postscript fonts.
Its main function is to let you work out the width of strings; it exposes a
single function, stringwidth(text, fontname), which works out the width of a
string in the given font. This is an integer defined in em-square units - each
character is defined in a 1000 x 1000 box called the em-square - for a 1-point high
character. So to convert to points, multiply by 1000 and then by point size.
The AFM loading stuff worked for me but is not being heavily tested, as pre-canning
the widths for the standard 14 fonts in Acrobat Reader is so much more useful. One
could easily extend it to get the exact bounding box for each characterm useful for
kerning.
The ascent_descent attribute of the module is a dictionary mapping font names
(with the proper Postscript capitalisation) to ascents and descents. I ought
to sort out the fontname case issue and the resolution of PIDDLE fonts to
Postscript font names within this module, but have not yet done so.
13th June 1999
"""
import os
StandardEnglishFonts = [
'Courier', 'Courier-Bold', 'Courier-Oblique', 'Courier-BoldOblique', 'Helvetica',
'Helvetica-Bold', 'Helvetica-Oblique', 'Helvetica-BoldOblique', 'Times-Roman', 'Times-Bold',
'Times-Italic', 'Times-BoldItalic', 'Symbol', 'ZapfDingbats'
]
##############################################################
#
# PDF Metrics
# This is a preamble to give us a stringWidth function.
# loads and caches AFM files, but won't need to as the
# standard fonts are there already
##############################################################
widths = {
'courier':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'helvetica':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'helvetica-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'symbol':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 713, 500, 549, 833, 778, 439, 333, 333, 500, 549, 250, 549, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 549, 549, 549, 444, 549, 722, 667, 722, 612, 611,
763, 603, 722, 333, 631, 722, 686, 889, 722, 722, 768, 741, 556, 592, 611, 690, 439, 768, 645,
795, 611, 333, 863, 333, 658, 500, 500, 631, 549, 549, 494, 439, 521, 411, 603, 329, 603, 549,
549, 576, 521, 549, 549, 521, 549, 603, 439, 576, 713, 686, 493, 686, 494, 480, 200, 480, 549, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 620, 247, 549, 167, 713, 500, 753, 753, 753, 753, 1042, 987, 603, 987, 603, 400, 549, 411,
549, 549, 713, 494, 460, 549, 549, 549, 549, 1000, 603, 1000, 658, 823, 686, 795, 987, 768, 768,
823, 768, 768, 713, 713, 713, 713, 713, 713, 713, 768, 713, 790, 790, 890, 823, 549, 250, 713,
603, 603, 1042, 987, 603, 987, 603, 494, 329, 790, 790, 786, 713, 384, 384, 384, 384, 384, 384,
494, 494, 494, 494, 0, 329, 274, 686, 686, 686, 384, 384, 384, 384, 384, 384, 494, 494, 790],
'times-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 555, 500, 500, 1000, 833, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 930, 722, 667, 722, 722, 667,
611, 778, 778, 389, 500, 778, 667, 944, 722, 778, 611, 778, 722, 556, 667, 722, 722, 1000, 722,
722, 667, 333, 278, 333, 581, 500, 333, 500, 556, 444, 556, 444, 333, 500, 556, 278, 333, 556,
278, 833, 556, 500, 556, 556, 444, 389, 333, 556, 500, 722, 500, 500, 444, 394, 220, 394, 520, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 540, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 300, 0, 0, 0, 0, 667, 778, 1000, 330, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500, 722,
556, 0, 0, 750],
'times-bolditalic': [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 389, 555, 500, 500, 833, 778, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 832, 667, 667, 667, 722, 667,
667, 722, 778, 389, 500, 667, 611, 889, 722, 722, 611, 722, 667, 556, 611, 722, 667, 889, 667,
611, 611, 333, 278, 333, 570, 500, 333, 500, 500, 444, 500, 444, 333, 500, 556, 278, 278, 500,
278, 778, 556, 500, 500, 500, 389, 389, 278, 556, 444, 667, 500, 444, 389, 348, 220, 348, 570,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 389, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500,
500, 250, 0, 500, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333,
333, 333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
944, 0, 266, 0, 0, 0, 0, 611, 722, 944, 300, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500,
722, 500, 0, 0, 750
],
'times-italic':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 420, 500, 500, 833, 778, 333, 333, 333, 500, 675, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 675, 675, 675, 500, 920, 611, 611, 667, 722, 611,
611, 722, 722, 333, 444, 667, 556, 833, 667, 722, 611, 722, 611, 500, 556, 722, 611, 833, 611,
556, 556, 389, 278, 389, 422, 500, 333, 500, 500, 444, 500, 444, 278, 500, 500, 278, 278, 444,
278, 722, 500, 500, 500, 500, 389, 389, 278, 500, 444, 667, 444, 444, 389, 400, 275, 400, 541, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500, 500, 214, 556, 500, 333, 333, 500, 500, 0, 500, 500, 500,
250, 0, 523, 350, 333, 556, 556, 500, 889, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 889, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 889, 0,
276, 0, 0, 0, 0, 556, 722, 944, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500, 667, 500,
0, 0, 750],
'times-roman': [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 408, 500, 500, 833, 778, 333, 333, 333, 500, 564, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 564, 564, 564, 444, 921, 722, 667, 667, 722, 611,
556, 722, 722, 333, 389, 722, 611, 889, 722, 722, 556, 722, 667, 556, 611, 722, 722, 944, 722,
722, 611, 333, 278, 333, 469, 500, 333, 444, 500, 444, 500, 444, 333, 500, 500, 278, 278, 500,
278, 778, 500, 500, 500, 500, 333, 389, 278, 500, 500, 722, 500, 500, 444, 480, 200, 480, 541,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 333, 500, 500, 167, 500, 500, 500, 500, 180, 444, 500, 333, 333, 556, 556, 0, 500, 500,
500, 250, 0, 453, 350, 333, 444, 444, 500, 1000, 1000, 0, 444, 0, 333, 333, 333, 333, 333, 333,
333, 333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
889, 0, 276, 0, 0, 0, 0, 611, 722, 889, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500,
722, 500, 0, 0, 750
],
'zapfdingbats':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 974, 961, 974, 980, 719, 789, 790, 791, 690, 960, 939, 549, 855, 911, 933, 911, 945, 974,
755, 846, 762, 761, 571, 677, 763, 760, 759, 754, 494, 552, 537, 577, 692, 786, 788, 788, 790,
793, 794, 816, 823, 789, 841, 823, 833, 816, 831, 923, 744, 723, 749, 790, 792, 695, 776, 768,
792, 759, 707, 708, 682, 701, 826, 815, 789, 789, 707, 687, 696, 689, 786, 787, 713, 791, 785,
791, 873, 761, 762, 762, 759, 759, 892, 892, 788, 784, 438, 138, 277, 415, 392, 392, 668, 668, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 732, 544, 544, 910, 667, 760, 760, 776, 595, 694, 626, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 894, 838, 1016, 458, 748, 924,
748, 918, 927, 928, 928, 834, 873, 828, 924, 924, 917, 930, 931, 463, 883, 836, 836, 867, 867,
696, 696, 874, 0, 874, 760, 946, 771, 865, 771, 888, 967, 888, 831, 873, 927, 970, 234]
}
ascent_descent = {'Courier': (629, -157),
'Courier-Bold': (626, -142),
'Courier-BoldOblique': (626, -142),
'Courier-Oblique': (629, -157),
'Helvetica': (718, -207),
'Helvetica-Bold': (718, -207),
'Helvetica-BoldOblique': (718, -207),
'Helvetica-Oblique': (718, -207),
'Symbol': (0, 0),
'Times-Bold': (676, -205),
'Times-BoldItalic': (699, -205),
'Times-Italic': (683, -205),
'Times-Roman': (683, -217),
'ZapfDingbats': (0, 0)}
def parseAFMfile(filename):
"""Returns an array holding the widths of all characters in the font.
Ultra-crude parser"""
alllines = open(filename, 'r').readlines()
# get stuff between StartCharMetrics and EndCharMetrics
metriclines = []
between = 0
for line in alllines:
if 'endcharmetrics' in line.lower():
between = 0
break
if between:
metriclines.append(line)
if 'startcharmetrics' in line.lower():
between = 1
# break up - very shaky assumption about array size
widths = [0] * 255
for line in metriclines:
chunks = line.split(';')
(c, cid) = chunks[0].split()
(wx, width) = chunks[1].split()
#(n, name) = string.split(chunks[2])
#(b, x1, y1, x2, y2) = string.split(chunks[3])
widths[int(cid)] = int(width)
# by default, any empties should get the width of a space
for i in range(len(widths)):
if widths[i] == 0:
widths[i] == widths[32]
return widths
class FontCache:
"""Loads and caches font width information on demand. Font names
converted to lower case for indexing. Public interface is stringwidth"""
def __init__(self):
global widths
self.__widtharrays = widths
def loadfont(self, fontname):
filename = AFMDIR + os.sep + fontname + '.afm'
print('cache loading', filename)
assert os.path.exists(filename)
widths = parseAFMfile(filename)
self.__widtharrays[fontname] = widths
def getfont(self, fontname):
try:
return self.__widtharrays[fontname]
except Exception:
try:
self.loadfont(fontname)
return self.__widtharrays[fontname]
except Exception:
# font not found, use Courier
print('Font', fontname, 'not found - using Courier for widths')
return self.getfont('courier')
def stringwidth(self, text, font):
widths = self.getfont(font.lower())
w = 0
for char in text:
w = w + widths[ord(char)]
return w
def status(self):
#returns loaded fonts
return self.__widtharrays.keys()
TheFontCache = FontCache()
#expose the singleton as a single function
stringwidth = TheFontCache.stringwidth
| |
from __future__ import absolute_import
from __future__ import print_function
from subprocess import *
import collections
import pprint
import logging
import errno
import os
import re
import sys
import tempfile
from .popenwrapper import Popen
fullSelfPath = os.path.realpath(__file__)
prefix = os.path.dirname(fullSelfPath)
driverDir = prefix
# Environmental variable for path to compiler tools (clang/llvm-link etc..)
llvmCompilerPathEnv = 'LLVM_COMPILER_PATH'
# This is the ELF section name inserted into binaries
elfSectionName='.llvm_bc'
# These are the MACH_O segment and section name
darwinSegmentName='__LLVM'
darwinSectionName='__llvm_bc'
# Internal logger
_logger = logging.getLogger(__name__)
# Flag for dumping
DUMPING = False
# This class applies filters to GCC argument lists. It has a few
# default arguments that it records, but does not modify the argument
# list at all. It can be subclassed to change this behavior.
#
# The idea is that all flags accepting a parameter must be specified
# so that they know to consume an extra token from the input stream.
# Flags and arguments can be recorded in any way desired by providing
# a callback. Each callback/flag has an arity specified - zero arity
# flags (such as -v) are provided to their callback as-is. Higher
# arities remove the appropriate number of arguments from the list and
# pass them to the callback with the flag.
#
# Most flags can be handled with a simple lookup in a table - these
# are exact matches. Other flags are more complex and can be
# recognized by regular expressions. All regular expressions must be
# tried, obviously. The first one that matches is taken, and no order
# is specified. Try to avoid overlapping patterns.
class ArgumentListFilter(object):
def __init__(self, inputList, exactMatches={}, patternMatches={}):
defaultArgExactMatches = {
'-o' : (1, ArgumentListFilter.outputFileCallback),
'-c' : (0, ArgumentListFilter.compileOnlyCallback),
'-E' : (0, ArgumentListFilter.preprocessOnlyCallback),
'-S' : (0, ArgumentListFilter.assembleOnlyCallback),
'--verbose' : (0, ArgumentListFilter.verboseFlagCallback),
'--param' : (1, ArgumentListFilter.defaultBinaryCallback),
'-aux-info' : (1, ArgumentListFilter.defaultBinaryCallback),
#iam: presumably the len(inputFiles) == 0 in this case
'--version' : (0, ArgumentListFilter.compileOnlyCallback),
'-v' : (0, ArgumentListFilter.compileOnlyCallback),
#warnings (apart from the regex below)
'-w' : (0, ArgumentListFilter.compileOnlyCallback),
'-W' : (0, ArgumentListFilter.compileOnlyCallback),
#iam: if this happens, then we need to stop and think.
'-emit-llvm' : (0, ArgumentListFilter.abortUnaryCallback),
#iam: buildworld and buildkernel use these flags
'-pipe' : (0, ArgumentListFilter.compileUnaryCallback),
'-undef' : (0, ArgumentListFilter.compileUnaryCallback),
'-nostdinc' : (0, ArgumentListFilter.compileUnaryCallback),
'-nostdinc++' : (0, ArgumentListFilter.compileUnaryCallback),
'-Qunused-arguments' : (0, ArgumentListFilter.compileUnaryCallback),
'-no-integrated-as' : (0, ArgumentListFilter.compileUnaryCallback),
'-integrated-as' : (0, ArgumentListFilter.compileUnaryCallback),
#iam: gcc uses this in both compile and link, but clang only in compile
'-pthread' : (0, ArgumentListFilter.compileUnaryCallback),
#iam: arm stuff
'-mno-omit-leaf-frame-pointer' : (0, ArgumentListFilter.compileUnaryCallback),
'-maes' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-aes' : (0, ArgumentListFilter.compileUnaryCallback),
'-mavx' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-avx' : (0, ArgumentListFilter.compileUnaryCallback),
'-mcmodel=kernel' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-red-zone' : (0, ArgumentListFilter.compileUnaryCallback),
'-mmmx' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-mmx' : (0, ArgumentListFilter.compileUnaryCallback),
'-msse' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-sse2' : (0, ArgumentListFilter.compileUnaryCallback),
'-msse2' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-sse3' : (0, ArgumentListFilter.compileUnaryCallback),
'-msse3' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-sse' : (0, ArgumentListFilter.compileUnaryCallback),
'-msoft-float' : (0, ArgumentListFilter.compileUnaryCallback),
'-m3dnow' : (0, ArgumentListFilter.compileUnaryCallback),
'-mno-3dnow' : (0, ArgumentListFilter.compileUnaryCallback),
# Preprocessor assertion
'-A' : (1, ArgumentListFilter.compileBinaryCallback),
'-D' : (1, ArgumentListFilter.compileBinaryCallback),
'-U' : (1, ArgumentListFilter.compileBinaryCallback),
# Dependency generation
'-M' : (0, ArgumentListFilter.dependencyOnlyCallback),
'-MM' : (0, ArgumentListFilter.dependencyOnlyCallback),
'-MF' : (1, ArgumentListFilter.dependencyBinaryCallback),
'-MG' : (0, ArgumentListFilter.dependencyOnlyCallback),
'-MP' : (0, ArgumentListFilter.dependencyOnlyCallback),
'-MT' : (1, ArgumentListFilter.dependencyBinaryCallback),
'-MQ' : (1, ArgumentListFilter.dependencyBinaryCallback),
'-MD' : (0, ArgumentListFilter.dependencyOnlyCallback),
'-MMD' : (0, ArgumentListFilter.dependencyOnlyCallback),
# Include
'-I' : (1, ArgumentListFilter.compileBinaryCallback),
'-idirafter' : (1, ArgumentListFilter.compileBinaryCallback),
'-include' : (1, ArgumentListFilter.compileBinaryCallback),
'-imacros' : (1, ArgumentListFilter.compileBinaryCallback),
'-iprefix' : (1, ArgumentListFilter.compileBinaryCallback),
'-iwithprefix' : (1, ArgumentListFilter.compileBinaryCallback),
'-iwithprefixbefore' : (1, ArgumentListFilter.compileBinaryCallback),
'-isystem' : (1, ArgumentListFilter.compileBinaryCallback),
'-isysroot' : (1, ArgumentListFilter.compileBinaryCallback),
'-iquote' : (1, ArgumentListFilter.compileBinaryCallback),
'-imultilib' : (1, ArgumentListFilter.compileBinaryCallback),
# mllvm
'-mllvm': (1, ArgumentListFilter.compileBinaryCallback),
# Language
'-ansi' : (0, ArgumentListFilter.compileUnaryCallback),
'-x' : (1, ArgumentListFilter.compileBinaryCallback),
# Debug
'-g' : (0, ArgumentListFilter.compileUnaryCallback),
'-g0' : (0, ArgumentListFilter.compileUnaryCallback), #iam: clang not gcc
'-gdwarf-2' : (0, ArgumentListFilter.compileUnaryCallback),
'-gdwarf-3' : (0, ArgumentListFilter.compileUnaryCallback),
'-p' : (0, ArgumentListFilter.compileUnaryCallback),
'-pg' : (0, ArgumentListFilter.compileUnaryCallback),
# Optimization
'-O' : (0, ArgumentListFilter.compileUnaryCallback),
'-O0' : (0, ArgumentListFilter.compileUnaryCallback),
'-O1' : (0, ArgumentListFilter.compileUnaryCallback),
'-O2' : (0, ArgumentListFilter.compileUnaryCallback),
'-O3' : (0, ArgumentListFilter.compileUnaryCallback),
'-Os' : (0, ArgumentListFilter.compileUnaryCallback),
'-Ofast' : (0, ArgumentListFilter.compileUnaryCallback),
'-Og' : (0, ArgumentListFilter.compileUnaryCallback),
# Component-specifiers
'-Xclang' : (1, ArgumentListFilter.defaultBinaryCallback),
'-Xpreprocessor' : (1, ArgumentListFilter.defaultBinaryCallback),
'-Xassembler' : (1, ArgumentListFilter.defaultBinaryCallback),
'-Xlinker' : (1, ArgumentListFilter.defaultBinaryCallback),
# Linker
'-l' : (1, ArgumentListFilter.linkBinaryCallback),
'-L' : (1, ArgumentListFilter.linkBinaryCallback),
'-T' : (1, ArgumentListFilter.linkBinaryCallback),
'-u' : (1, ArgumentListFilter.linkBinaryCallback),
#iam: specify the entry point
'-e' : (1, ArgumentListFilter.linkBinaryCallback),
# runtime library search path
'-rpath' : (1, ArgumentListFilter.linkBinaryCallback),
# iam: showed up in buildkernel
'-shared' : (0, ArgumentListFilter.linkUnaryCallback),
'-static' : (0, ArgumentListFilter.linkUnaryCallback),
'-nostdlib' : (0, ArgumentListFilter.linkUnaryCallback),
'-nodefaultlibs' : (0, ArgumentListFilter.linkUnaryCallback),
'-rdynamic' : (0, ArgumentListFilter.linkUnaryCallback),
# darwin flags
'-dynamiclib' : (0, ArgumentListFilter.linkUnaryCallback),
'-current_version' : (1, ArgumentListFilter.linkBinaryCallback),
'-compatibility_version' : (1, ArgumentListFilter.linkBinaryCallback),
# dragonegg mystery argument
'--64' : (0, ArgumentListFilter.compileUnaryCallback),
#
# BD: need to warn the darwin user that these flags will rain on their parade
# (the Darwin ld is a bit single minded)
#
# 1) compilation with -fvisibility=hidden causes trouble when we try to
# attach bitcode filenames to an object file. The global symbols in object
# files get turned into local symbols when we invoke 'ld -r'
#
# 2) all stripping commands (e.g., -dead_strip) remove the __LLVM segment after
# linking
#
# Update: found a fix for problem 1: add flag -keep_private_externs when
# calling ld -r.
#
'-Wl,-dead_strip' : (0, ArgumentListFilter.darwinWarningLinkUnaryCallback),
}
#
# Patterns for other command-line arguments:
# - inputFiles
# - objecFiles (suffix .o)
# - libraries + linker options as in -lxxx -Lpath or -Wl,xxxx
# - preprocessor options as in -DXXX -Ipath
# - compiler warning options: -W....
# - optimiziation and other flags: -f...
#
defaultArgPatterns = {
r'^.+\.(c|cc|cpp|C|cxx|i|s|S)$' : (0, ArgumentListFilter.inputFileCallback),
#iam: the object file recogition is not really very robust, object files
# should be determined by their existance and contents...
r'^.+\.(o|lo|So|so|po|a)$' : (0, ArgumentListFilter.objectFileCallback),
r'^-(l|L).+$' : (0, ArgumentListFilter.linkUnaryCallback),
r'^-I.+$' : (0, ArgumentListFilter.compileUnaryCallback),
r'^-D.+$' : (0, ArgumentListFilter.compileUnaryCallback),
r'^-Wl,.+$' : (0, ArgumentListFilter.linkUnaryCallback),
r'^-W(?!l,).*$' : (0, ArgumentListFilter.compileUnaryCallback),
r'^-f.+$' : (0, ArgumentListFilter.compileUnaryCallback),
r'^-std=.+$' : (0, ArgumentListFilter.compileUnaryCallback),
r'^-fsanitize=.+$' : (0, ArgumentListFilter.compileAndLinkCallback),
}
#iam: try and keep track of the files, input object, and output
self.inputFiles = []
self.objectFiles = []
self.outputFilename = None
#iam: try and split the args into linker and compiler switches
self.compileArgs = []
self.linkArgs = []
self.isVerbose = False
self.isDependencyOnly = False
self.isPreprocessOnly = False
self.isAssembleOnly = False
self.isAssembly = False
self.isCompileOnly = False
argExactMatches = dict(defaultArgExactMatches)
argExactMatches.update(exactMatches)
argPatterns = dict(defaultArgPatterns)
argPatterns.update(patternMatches)
self._inputArgs = collections.deque(inputList)
#iam: parse the cmd line, bailing if we discover that there will be no second phase.
while ( len(self._inputArgs) > 0 and
not (self.isAssembly or
self.isAssembleOnly or
self.isPreprocessOnly ) ):
# Get the next argument
currentItem = self._inputArgs.popleft()
_logger.debug('Trying to match item ' + currentItem)
# First, see if this exact flag has a handler in the table.
# This is a cheap test. Otherwise, see if the input matches
# some pattern with a handler that we recognize
if currentItem in argExactMatches:
(arity, handler) = argExactMatches[currentItem]
flagArgs = self._shiftArgs(arity)
handler(self, currentItem, *flagArgs)
else:
matched = False
for pattern, (arity, handler) in argPatterns.items():
if re.match(pattern, currentItem):
flagArgs = self._shiftArgs(arity)
handler(self, currentItem, *flagArgs)
matched = True
break
# If no action has been specified, this is a zero-argument
# flag that we should just keep.
if not matched:
_logger.warning('Did not recognize the compiler flag "{0}"'.format(currentItem))
self.compileUnaryCallback(currentItem)
if DUMPING:
self.dump()
def _shiftArgs(self, nargs):
ret = []
while nargs > 0:
a = self._inputArgs.popleft()
ret.append(a)
nargs = nargs - 1
return ret
def abortUnaryCallback(self, flag):
_logger.warning('Out of context experience: "{0}"'.format(str(self.inputList)))
sys.exit(1)
def inputFileCallback(self, infile):
_logger.debug('Input file: ' + infile)
self.inputFiles.append(infile)
if re.search('\\.(s|S)$', infile):
self.isAssembly = True
def outputFileCallback(self, flag, filename):
self.outputFilename = filename
def objectFileCallback(self, objfile):
self.objectFiles.append(objfile)
def preprocessOnlyCallback(self, flag):
self.isPreprocessOnly = True
def dependencyOnlyCallback(self, flag):
self.isDependencyOnly = True
self.compileArgs.append(flag)
def assembleOnlyCallback(self, flag):
self.isAssembleOnly = True
def verboseFlagCallback(self, flag):
self.isVerbose = True
def compileOnlyCallback(self, flag):
self.isCompileOnly = True
def linkUnaryCallback(self, flag):
self.linkArgs.append(flag)
def compileUnaryCallback(self, flag):
self.compileArgs.append(flag)
def compileAndLinkCallback(self, flag):
self.compileArgs.append(flag)
self.linkArgs.append(flag)
def darwinWarningLinkUnaryCallback(self, flag):
if sys.platform.startswith('darwin'):
_logger.warning('The flag "{0}" cannot be used with this tool'.format(flag))
sys.exit(1)
else:
self.linkArgs.append(flag)
def defaultBinaryCallback(self, flag, arg):
_logger.warning('Ignoring compiler arg pair: "{0} {1}"'.format(flag, arg))
def dependencyBinaryCallback(self, flag, arg):
self.isDependencyOnly = True
self.compileArgs.append(flag)
self.compileArgs.append(arg)
def compileBinaryCallback(self, flag, arg):
self.compileArgs.append(flag)
self.compileArgs.append(arg)
def linkBinaryCallback(self, flag, arg):
self.linkArgs.append(flag)
self.linkArgs.append(arg)
def getOutputFilename(self):
if self.outputFilename is not None:
return self.outputFilename
elif self.isCompileOnly:
#iam: -c but no -o, therefore the obj should end up in the cwd.
(path, base) = os.path.split(self.inputFiles[0])
(root, ext) = os.path.splitext(base)
return '{0}.o'.format(root)
else:
return 'a.out'
# iam: returns a pair [objectFilename, bitcodeFilename] i.e .o and .bc.
# the hidden flag determines whether the objectFile is hidden like the
# bitcodeFile is (starts with a '.'), use the logging level & DUMPING flag to get a sense
# of what is being written out.
def getArtifactNames(self, srcFile, hidden=False):
(srcpath, srcbase) = os.path.split(srcFile)
(srcroot, srcext) = os.path.splitext(srcbase)
if hidden:
objbase = '.{0}.o'.format(srcroot)
else:
objbase = '{0}.o'.format(srcroot)
bcbase = '.{0}.o.bc'.format(srcroot)
path = ''
if self.outputFilename is not None:
path = os.path.dirname(self.outputFilename)
return [os.path.join(path, objbase), os.path.join(path, bcbase)]
#iam: for printing our partitioning of the args
def dump(self):
_logger.debug('compileArgs: {0}'.format(self.compileArgs))
_logger.debug('inputFiles: {0}'.format(self.inputFiles))
_logger.debug('linkArgs: {0}'.format(self.linkArgs))
_logger.debug('objectFiles: {0}'.format(self.objectFiles))
_logger.debug('outputFilename: {0}'.format(self.outputFilename))
for srcFile in self.inputFiles:
_logger.debug('srcFile: {0}'.format(srcFile))
(objFile, bcFile) = self.getArtifactNames(srcFile)
_logger.debug('{0} ===> ({1}, {2})'.format(srcFile, objFile, bcFile))
# Same as above, but change the name of the output filename when
# building the bitcode file so that we don't clobber the object file.
class ClangBitcodeArgumentListFilter(ArgumentListFilter):
def __init__(self, arglist):
localCallbacks = { '-o' : (1, ClangBitcodeArgumentListFilter.outputFileCallback) }
super(ClangBitcodeArgumentListFilter, self).__init__(arglist, exactMatches=localCallbacks)
def outputFileCallback(self, flag, filename):
self.outputFilename = filename
# Static class that allows the type of a file to be checked.
class FileType(object):
# Provides int -> str map
revMap = { }
@classmethod
def getFileType(cls, fileName):
# This is a hacky way of determining
# the type of file we are looking at.
# Maybe we should use python-magic instead?
fileP = Popen(['file',os.path.realpath(fileName)], stdout=PIPE)
output = fileP.communicate()[0]
output = output.decode()
if 'ELF' in output and 'executable' in output:
return cls.ELF_EXECUTABLE
if 'Mach-O' in output and 'executable' in output:
return cls.MACH_EXECUTABLE
elif 'ELF' in output and 'shared' in output:
return cls.ELF_SHARED
elif 'Mach-O' in output and 'dynamically linked shared' in output:
return cls.MACH_SHARED
elif 'current ar archive' in output:
return cls.ARCHIVE
elif 'ELF' in output and 'relocatable' in output:
return cls.ELF_OBJECT
elif 'Mach-O' in output and 'object' in output:
return cls.MACH_OBJECT
else:
return cls.UNKNOWN
@classmethod
def init(cls):
for (index, name) in enumerate(('UNKNOWN',
'ELF_EXECUTABLE',
'ELF_OBJECT',
'ELF_SHARED',
'MACH_EXECUTABLE',
'MACH_OBJECT',
'MACH_SHARED',
'ARCHIVE')):
setattr(cls, name, index)
cls.revMap[index] = name
# Initialise FileType static class
FileType.init()
def attachBitcodePathToObject(bcPath, outFileName):
# Don't try to attach a bitcode path to a binary. Unfortunately
# that won't work.
(root, ext) = os.path.splitext(outFileName)
_logger.debug('attachBitcodePathToObject: {0} ===> {1} [ext = {2}]\n'.format(bcPath, outFileName, ext))
#iam: this also looks very dodgey; we need a more reliable way to do this:
if ext not in ('.o', '.lo', '.os', '.So', '.po'):
_logger.warning('Cannot attach bitcode path to "{0} of type {1}"'.format(outFileName, FileType.getFileType(outFileName)))
return
# Now just build a temporary text file with the full path to the
# bitcode file that we'll write into the object file.
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
absBcPath = os.path.abspath(bcPath)
f.write(absBcPath.encode())
f.write('\n'.encode())
_logger.debug(pprint.pformat('Wrote "{0}" to file "{1}"'.format(absBcPath, f.name)))
# Ensure buffers are flushed so that objcopy doesn't read an empty
# file
f.flush()
os.fsync(f.fileno())
f.close()
# Now write our bitcode section
if (sys.platform.startswith('darwin')):
objcopyCmd = ['ld', '-r', '-keep_private_externs', outFileName, '-sectcreate', darwinSegmentName, darwinSectionName, f.name, '-o', outFileName]
else:
objcopyCmd = ['objcopy', '--add-section', '{0}={1}'.format(elfSectionName, f.name), outFileName]
orc = 0
try:
if os.path.getsize(outFileName) > 0:
objProc = Popen(objcopyCmd)
orc = objProc.wait()
except OSError:
# configure loves to immediately delete things, causing issues for
# us here. Just ignore it
os.remove(f.name)
sys.exit(0)
os.remove(f.name)
if orc != 0:
_logger.error('objcopy failed with {0}'.format(orc))
sys.exit(-1)
class BuilderBase(object):
def __init__(self, cmd, isCxx, prefixPath=None):
self.cmd = cmd
self.isCxx = isCxx
# Used as prefix path for compiler
if prefixPath:
self.prefixPath = prefixPath
# Ensure prefixPath has trailing slash
if self.prefixPath[-1] != os.path.sep:
self.prefixPath = self.prefixPath + os.path.sep
# Check prefix path exists
if not os.path.exists(self.prefixPath):
errorMsg='Path to compiler "{0}" does not exist'.format(self.prefixPath)
_logger.error(errorMsg)
raise Exception(errorMsg)
else:
self.prefixPath = ''
#clang and drogonegg share the same taste in bitcode filenames.
def getBitcodeFileName(self, argFilter):
(dirs, baseFile) = os.path.split(argFilter.getOutputFilename())
bcfilename = os.path.join(dirs, '.{0}.bc'.format(baseFile))
return bcfilename
class ClangBuilder(BuilderBase):
def __init__(self, cmd, isCxx, prefixPath=None):
super(ClangBuilder, self).__init__(cmd, isCxx, prefixPath)
def getBitcodeCompiler(self):
cc = self.getCompiler()
return cc + ['-emit-llvm']
def getCompiler(self):
if self.isCxx:
return ['{0}clang++'.format(self.prefixPath)]
else:
return ['{0}clang'.format(self.prefixPath)]
def getBitcodeArglistFilter(self):
return ClangBitcodeArgumentListFilter(self.cmd)
def extraBitcodeArgs(self, argFilter):
bcPath = self.getBitcodeFileName(argFilter)
return ['-o', bcPath]
def attachBitcode(self, argFilter):
bcname = self.getBitcodeFileName(argFilter)
outFile = argFilter.getOutputFilename()
attachBitcodePathToObject(bcname, outFile)
#iam: this should join the dodo soon, yes?
class DragoneggBuilder(BuilderBase):
def __init__(self, cmd, isCxx, prefixPath=None):
super(DragoneggBuilder, self).__init__(cmd, isCxx, prefixPath)
def getBitcodeCompiler(self):
pth = os.getenv('LLVM_DRAGONEGG_PLUGIN')
cc = self.getCompiler()
# We use '-B' to tell gcc where to look for an assembler.
# When we build LLVM bitcode we do not want to use the GNU assembler,
# instead we want gcc to use our own assembler (see driver/as).
cmd = cc + ['-B', driverDir, '-fplugin={0}'.format(pth), '-fplugin-arg-dragonegg-emit-ir']
_logger.debug(cmd)
return cmd
def getCompiler(self):
pfx = ''
if os.getenv('LLVM_GCC_PREFIX') is not None:
pfx = os.getenv('LLVM_GCC_PREFIX')
if self.isCxx:
return ['{0}{1}g++'.format(self.prefixPath, pfx)]
else:
return ['{0}{1}gcc'.format(self.prefixPath, pfx)]
def getBitcodeArglistFilter(self):
return ArgumentListFilter(self.cmd)
# Don't need to do anything since the -B flag in the bitcode
# compiler and the assembly stub handles it
def attachBitcode(self, argFilter):
pass
def extraBitcodeArgs(self, argFilter):
return []
def getBuilder(cmd, isCxx):
compilerEnv = 'LLVM_COMPILER'
cstring = os.getenv(compilerEnv)
pathPrefix = os.getenv(llvmCompilerPathEnv) # Optional
_logger.info('WLLVM compiler using {0}'.format(cstring))
if pathPrefix:
_logger.info('WLLVM compiler path prefix "{0}"'.format(pathPrefix))
if cstring == 'clang':
return ClangBuilder(cmd, isCxx, pathPrefix)
elif cstring == 'dragonegg':
return DragoneggBuilder(cmd, isCxx, pathPrefix)
elif cstring == None:
errorMsg = ' No compiler set. Please set environment variable ' + compilerEnv
_logger.critical(errorMsg)
raise Exception(errorMsg)
else:
errorMsg= compilerEnv + '=' + str(cstring) + ' : Invalid compiler type'
_logger.critical(errorMsg)
raise Exception(errorMsg)
def buildObject(builder):
objCompiler = builder.getCompiler()
objCompiler.extend(builder.cmd)
proc = Popen(objCompiler)
rc = proc.wait()
if rc != 0:
sys.exit(rc)
# This command does not have the executable with it
def buildAndAttachBitcode(builder):
af = builder.getBitcodeArglistFilter()
if ( len(af.inputFiles) == 0 or
af.isAssembly or
af.isAssembleOnly or
(af.isDependencyOnly and not af.isCompileOnly) or
af.isPreprocessOnly ):
_logger.debug('No work to do')
_logger.debug(af.__dict__)
return
#iam: when we have multiple input files we'll have to keep track of their object files.
newObjectFiles = []
hidden = not af.isCompileOnly
if len(af.inputFiles) == 1 and af.isCompileOnly:
# iam:
# we could have
# "... -c -o foo.o" or even "... -c -o foo.So" which is OK, but we could also have
# "... -c -o crazy-assed.objectfile" which we wouldn't get right (yet)
# so we need to be careful with the objFile and bcFile
# maybe python-magic is in our future ...
srcFile = af.inputFiles[0]
(objFile, bcFile) = af.getArtifactNames(srcFile, hidden)
if af.outputFilename is not None:
objFile = af.outputFilename
bcFile = builder.getBitcodeFileName(af)
buildBitcodeFile(builder, srcFile, bcFile)
attachBitcodePathToObject(bcFile, objFile)
else:
for srcFile in af.inputFiles:
(objFile, bcFile) = af.getArtifactNames(srcFile, hidden)
if hidden:
buildObjectFile(builder, srcFile, objFile)
newObjectFiles.append(objFile)
buildBitcodeFile(builder, srcFile, bcFile)
attachBitcodePathToObject(bcFile, objFile)
if not af.isCompileOnly:
linkFiles(builder, newObjectFiles)
sys.exit(0)
def linkFiles(builder, objectFiles):
af = builder.getBitcodeArglistFilter()
outputFile = af.getOutputFilename()
cc = builder.getCompiler()
cc.extend(objectFiles)
cc.extend(af.objectFiles)
cc.extend(af.linkArgs)
cc.extend(['-o', outputFile])
proc = Popen(cc)
rc = proc.wait()
if rc != 0:
_logger.warning('Failed to link "{0}"'.format(str(cc)))
sys.exit(rc)
def buildBitcodeFile(builder, srcFile, bcFile):
af = builder.getBitcodeArglistFilter()
bcc = builder.getBitcodeCompiler()
bcc.extend(af.compileArgs)
bcc.extend(['-c', srcFile])
bcc.extend(['-o', bcFile])
_logger.debug('buildBitcodeFile: {0}\n'.format(bcc))
proc = Popen(bcc)
rc = proc.wait()
if rc != 0:
_logger.warning('Failed to generate bitcode "{0}" for "{1}"'.format(bcFile, srcFile))
sys.exit(rc)
def buildObjectFile(builder, srcFile, objFile):
af = builder.getBitcodeArglistFilter()
cc = builder.getCompiler()
cc.extend(af.compileArgs)
cc.append(srcFile)
cc.extend(['-c', '-o', objFile])
_logger.debug('buildObjectFile: {0}\n'.format(cc))
proc = Popen(cc)
rc = proc.wait()
if rc != 0:
_logger.warning('Failed to generate object "{0}" for "{1}"'.format(objFile, srcFile))
sys.exit(rc)
# bd & iam:
#
# case 1 (compileOnly):
#
# if the -c flag exists then so do all the .o files, and we need to
# locate them and produce and embed the bit code.
#
# locating them is easy:
# either the .o is in the cmdline and we are in the simple case,
# or else it was generated according to getObjectFilename
#
# we then produce and attach bitcode for each inputFile in the cmdline
#
#
# case 2 (compile and link)
#
# af.inputFiles is not empty, and compileOnly is false.
# in this case the .o's may not exist, we must regenerate
# them in any case.
#
#
# case 3 (link only)
#
# in this case af.inputFiles is empty and we are done
#
#
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ConservationFeature'
db.create_table('seak_conservationfeature', (
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('level1', self.gf('django.db.models.fields.CharField')(max_length=99)),
('level2', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level3', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level4', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level5', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('dbf_fieldname', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('units', self.gf('django.db.models.fields.CharField')(max_length=90, null=True, blank=True)),
('uid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
))
db.send_create_signal('seak', ['ConservationFeature'])
# Adding model 'Cost'
db.create_table('seak_cost', (
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('uid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('dbf_fieldname', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('units', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)),
('desc', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('seak', ['Cost'])
# Adding model 'PlanningUnit'
db.create_table('seak_planningunit', (
('fid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('geometry', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(srid=3857, null=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('seak', ['PlanningUnit'])
# Adding model 'PuVsCf'
db.create_table('seak_puvscf', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('cf', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.ConservationFeature'])),
('amount', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['PuVsCf'])
# Adding unique constraint on 'PuVsCf', fields ['pu', 'cf']
db.create_unique('seak_puvscf', ['pu_id', 'cf_id'])
# Adding model 'PuVsCost'
db.create_table('seak_puvscost', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('cost', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.Cost'])),
('amount', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['PuVsCost'])
# Adding unique constraint on 'PuVsCost', fields ['pu', 'cost']
db.create_unique('seak_puvscost', ['pu_id', 'cost_id'])
# Adding model 'Scenario'
db.create_table('seak_scenario', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='seak_scenario_related', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length='255')),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='seak_scenario_related', null=True, to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('input_targets', self.gf('seak.models.JSONField')()),
('input_penalties', self.gf('seak.models.JSONField')()),
('input_relativecosts', self.gf('seak.models.JSONField')()),
('input_geography', self.gf('seak.models.JSONField')()),
('input_scalefactor', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('description', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('output_best', self.gf('seak.models.JSONField')(null=True, blank=True)),
('output_pu_count', self.gf('seak.models.JSONField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['Scenario'])
# Adding M2M table for field sharing_groups on 'Scenario'
db.create_table('seak_scenario_sharing_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('scenario', models.ForeignKey(orm['seak.scenario'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('seak_scenario_sharing_groups', ['scenario_id', 'group_id'])
# Adding model 'Folder'
db.create_table('seak_folder', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='seak_folder_related', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length='255')),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='seak_folder_related', null=True, to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
))
db.send_create_signal('seak', ['Folder'])
# Adding M2M table for field sharing_groups on 'Folder'
db.create_table('seak_folder_sharing_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('folder', models.ForeignKey(orm['seak.folder'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('seak_folder_sharing_groups', ['folder_id', 'group_id'])
# Adding model 'PlanningUnitShapes'
db.create_table('seak_planningunitshapes', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('stamp', self.gf('django.db.models.fields.FloatField')()),
('bests', self.gf('django.db.models.fields.IntegerField')(default=0)),
('hits', self.gf('django.db.models.fields.IntegerField')(default=0)),
('fid', self.gf('django.db.models.fields.IntegerField')(null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=99, null=True)),
('geometry', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(srid=3857, null=True, blank=True)),
))
db.send_create_signal('seak', ['PlanningUnitShapes'])
def backwards(self, orm):
# Removing unique constraint on 'PuVsCost', fields ['pu', 'cost']
db.delete_unique('seak_puvscost', ['pu_id', 'cost_id'])
# Removing unique constraint on 'PuVsCf', fields ['pu', 'cf']
db.delete_unique('seak_puvscf', ['pu_id', 'cf_id'])
# Deleting model 'ConservationFeature'
db.delete_table('seak_conservationfeature')
# Deleting model 'Cost'
db.delete_table('seak_cost')
# Deleting model 'PlanningUnit'
db.delete_table('seak_planningunit')
# Deleting model 'PuVsCf'
db.delete_table('seak_puvscf')
# Deleting model 'PuVsCost'
db.delete_table('seak_puvscost')
# Deleting model 'Scenario'
db.delete_table('seak_scenario')
# Removing M2M table for field sharing_groups on 'Scenario'
db.delete_table('seak_scenario_sharing_groups')
# Deleting model 'Folder'
db.delete_table('seak_folder')
# Removing M2M table for field sharing_groups on 'Folder'
db.delete_table('seak_folder_sharing_groups')
# Deleting model 'PlanningUnitShapes'
db.delete_table('seak_planningunitshapes')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 18, 8, 47, 45, 101970)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 18, 8, 47, 45, 101877)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'seak.conservationfeature': {
'Meta': {'object_name': 'ConservationFeature'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'level1': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'level2': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level3': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level4': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level5': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '90', 'null': 'True', 'blank': 'True'})
},
'seak.cost': {
'Meta': {'object_name': 'Cost'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'seak.folder': {
'Meta': {'object_name': 'Folder'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_folder_related'", 'to': "orm['auth.User']"})
},
'seak.planningunit': {
'Meta': {'object_name': 'PlanningUnit'},
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'fid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'})
},
'seak.planningunitshapes': {
'Meta': {'object_name': 'PlanningUnitShapes'},
'bests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'fid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"}),
'stamp': ('django.db.models.fields.FloatField', [], {})
},
'seak.puvscf': {
'Meta': {'unique_together': "(('pu', 'cf'),)", 'object_name': 'PuVsCf'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.ConservationFeature']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.puvscost': {
'Meta': {'unique_together': "(('pu', 'cost'),)", 'object_name': 'PuVsCost'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.Cost']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.scenario': {
'Meta': {'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_geography': ('seak.models.JSONField', [], {}),
'input_penalties': ('seak.models.JSONField', [], {}),
'input_relativecosts': ('seak.models.JSONField', [], {}),
'input_scalefactor': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'input_targets': ('seak.models.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_best': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'output_pu_count': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_scenario_related'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['seak']
| |
import os
import re
import json
import inspect
from collections import defaultdict
import yaml
from fabric.api import local, settings, hide
from libcloud.compute.types import Provider, OLD_CONSTANT_TO_NEW_MAPPING
from libcloud.compute.providers import get_driver as get_libcloud_driver
from .utils import render_template, get_templates_vars, render_json_to_template
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(BASE_PATH, 'config.yaml'), 'r') as conf_file:
CONFIG = yaml.load(conf_file)
class Service:
def __init__(self, node_name, service_type):
self.config = CONFIG['services']
self.name = '{prefix}-{node_name}-{type}.service'.format(
prefix=self.config['prefix'],
node_name=node_name,
type=service_type
)
self.type = service_type
@property
def exist(self):
with settings(warn_only=True):
with hide('commands'):
result = local('systemctl list-unit-files', capture=True)
for line in result.split('\n'):
if line.startswith(self.name):
return True
return False
def create(self, template_context):
render_template(
template=self.config['templates'][self.type],
to_file=os.path.join(self.config['systemd_dir'], self.name),
context=template_context
)
# Enable and run compute node service
self.command('enable')
self.command('start')
def delete(self):
# Disable and stop service
self.command('disable')
self.command('stop')
# Delete service file
service_file = os.path.join(CONFIG['services']['systemd_dir'], self.name)
if os.path.exists(service_file):
os.remove(service_file)
def command(self, cmd):
valid_commands = CONFIG['services']['commands']
if cmd not in valid_commands:
raise BadServiceCommand('Command "{}" not supported. Valid commands: {}'.format(cmd, valid_commands))
with settings(warn_only=True):
response = local('systemctl {cmd} {nova_service}'.format(cmd=cmd, nova_service=self.name), capture=False)
return response
class Node:
def __init__(self, name, node_type):
self.name = name
self.type = node_type
self.config = CONFIG['nodes']
self.services = {service_name: self._get_service(service_name) for service_name in self.config['services']}
def create(self, template_context):
for service_name, service in self.services.items():
if not service:
service_conf = self.config['services'][service_name]
template_context['config_file'] = os.path.join(
service_conf['dir_dest'], '{prefix}-{name}-{type}.conf'.format(
prefix=self.config['prefix'],
name=self.name,
type=self.type
)
)
render_template(
template=service_conf['template'],
to_file=template_context['config_file'],
context=template_context
)
new_service = Service(node_name=self.name, service_type=service_name)
new_service.create(template_context=template_context)
self.services[service_name] = new_service
def delete(self):
# Delete all services
for service_name, service in self.services.items():
if service:
service.delete()
# Delete configs
service_conf = self.config['services'][service_name]
config_file = os.path.join(
service_conf['dir_dest'], '{prefix}-{name}-{type}.conf'.format(
prefix=self.config['prefix'],
name=self.name,
type=self.type
)
)
if os.path.exists(config_file):
os.remove(config_file)
def command(self, cmd):
response = []
for service_name, service in self.services.items():
if service:
response.append(service.command(cmd))
return response
def _get_service(self, service_type):
service = Service(node_name=self.name, service_type=service_type)
return service if service.exist else None
class NodeManager:
def __init__(self):
self.valid_node_types = self._get_valid_node_types()
def node_create(self, node_name, node_type, **kwargs):
# Check node type
if node_type not in self.valid_node_types['providers']:
raise NodeTypeNotFound('Node type "{}" not found. Valid types: {}'.format(node_type,
self.valid_node_types.keys()))
# Check if node already exist
enabled_nodes = self.node_list()
if node_name in [node.name for node in enabled_nodes]:
raise NodeAlreadyExist('Node "{}" already exist'.format(node_name))
kwargs['hostname'] = node_name
kwargs['node_type'] = node_type
# Check if we got all necessary params in kwargs
templates_vars = self.get_node_params(node_type)
if not all([var in kwargs for var in templates_vars]):
raise AttributeError('Too few arguments to create "{}" node. Need to provide: {}'
.format(node_type, templates_vars.keys()))
kwargs['provider_config'] = render_json_to_template(
provider=self.valid_node_types['providers'][node_type],
token_values=kwargs
)
new_node = Node(name=node_name, node_type=node_type)
new_node.create(template_context=kwargs)
def node_delete(self, node_name):
node = self.node_get(node_name)
node.delete()
def node_get(self, node_name):
all_nodes = self.node_list()
for node in all_nodes:
if node.name == node_name:
return node
@staticmethod
def node_list():
nodes = []
nodes_conf = CONFIG['nodes']
for filename in os.listdir(nodes_conf['services']['nova']['dir_dest']):
match = re.search(r'{}-(?P<name>.+)-(?P<type>.+)\.conf'.format(nodes_conf['prefix']), filename)
if match:
node = Node(
name=match.groupdict().get('name'),
node_type=match.groupdict().get('type')
)
nodes.append(node)
return nodes
def get_node_params(self, node_type=None):
# Check if we got all necessary params in kwargs
templates_vars = get_templates_vars(
templates=[service['template'] for service_name, service in CONFIG['nodes']['services'].items()]
)
# Remove hostname and provider_config form vars, because hostname == node_name
templates_vars.remove('hostname')
templates_vars.remove('provider_config')
all_node_params = {}
for n_type_name, n_type_params in self.valid_node_types['providers'].items():
node_params = defaultdict(lambda: 'Description not provided')
node_params.update(n_type_params.get('tokens', {}))
for token in templates_vars:
if token in self.valid_node_types['basic_tokens']:
node_params[token] = self.valid_node_types['basic_tokens'][token]
else:
node_params[token] = 'Description not provided'
if n_type_name == node_type:
return node_params
all_node_params[n_type_name] = node_params
return all_node_params
@staticmethod
def _get_libcloud_providers():
providers = {}
for provider_name in [item for item in vars(Provider) if not item.startswith('_')]:
if provider_name.lower() in OLD_CONSTANT_TO_NEW_MAPPING:
continue
try:
provider_cls = get_libcloud_driver(getattr(Provider, provider_name))
except Exception as e:
continue
provider_cls_info = inspect.getargspec(provider_cls)
node_params = defaultdict(lambda: 'Description not provided')
for arg in provider_cls_info.args:
if arg not in ['cls', 'self']:
node_params[arg] = {
'description': {
'en': '',
'ru': ''
},
'type': 'str'
}
providers[provider_name] = {
'section_name': 'kozinaki_GCP',
'tokens': node_params
}
return providers
def _get_valid_node_types(self):
with open(os.path.join(BASE_PATH, 'providers.json'), 'r') as f:
providers_data = json.load(f)
# libcloud_providers = self._get_libcloud_providers()
# providers_data['providers'].update(libcloud_providers)
return providers_data
# Compute node manager exceptions
ComputeNodeManager = type('ComputeNodeManager', (Exception,), {})
BadServiceCommand = type('BadServiceCommand', (ComputeNodeManager,), {})
NodeNotFound = type('NodeNotFound', (ComputeNodeManager,), {})
NodeAlreadyExist = type('NodeAlreadyExist', (ComputeNodeManager,), {})
NodeTypeNotFound = type('NodeTypeNotFound', (ComputeNodeManager,), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.