gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from calvin.calvinsys import CalvinSys
from calvin.actorstore.store import ActorStore
from calvin.runtime.north.calvin_token import Token
from calvin.runtime.south.endpoint import Endpoint
def fwrite(port, value):
if isinstance(value, Token):
port.fifo.write(value)
else:
port.fifo.write(Token(value=value))
def pwrite(actor, portname, value):
port = actor.inports.get(portname)
if not port:
raise Exception("No such port %s" % (portname,))
if isinstance(value, list):
for v in value:
fwrite(port, v)
else:
fwrite(port, value)
def pread(actor, portname, number=1):
port = actor.outports.get(portname, None)
assert port
if number > 0:
if pavailable(actor, portname) < number:
raise AssertionError("Too few tokens available, %d, expected %d" % (pavailable(actor, portname), number))
else:
if pavailable(actor, portname) > number:
raise AssertionError("Too many tokens available, %d, expected %d" % (pavailable(actor, portname), number))
values = [port.fifo.read(actor.id).value for _ in range(number)]
port.fifo.commit_reads(actor.id), True
return values
def pavailable(actor, portname):
port = actor.outports.get(portname, None)
assert port
return port.fifo.available_tokens(actor.id)
class DummyInEndpoint(Endpoint):
"""
Dummy in endpoint for actor test
"""
def __init__(self, port):
super(DummyInEndpoint, self).__init__(port)
def is_connected(self):
return True
def read_token(self):
token = self.port.fifo.read(self.port.id)
if token:
self.port.fifo.commit_reads(self.port.id, True)
return token
def available_tokens(self):
tokens = 0
tokens += self.port.fifo.available_tokens(self.port.id)
return tokens
def peek_token(self):
return self.port.fifo.read(self.port.id)
def commit_peek_as_read(self):
self.port.fifo.commit_reads(self.port.id)
def peek_rewind(self):
self.port.fifo.rollback_reads(self.port.id)
class FDMock(object):
def __init__(self, fname, mode):
self.fp = open(fname, mode)
if 'r' in mode:
self.buffer = self.fp.read()
else:
self.buffer = ""
def close(self):
self.fp.close()
def eof(self):
return len(self.buffer) == 0
def has_data(self):
return len(self.buffer) > 0
def read(self):
data = self.buffer
self.buffer = ""
return data
def write(self, data):
self.buffer += data
self.fp.write(data)
def read_line(self):
if '\n' in self.buffer:
line, self.buffer = self.buffer.split("\n", 1)
else:
line = self.buffer
self.buffer = ""
return line
def write_line(self, data):
self.buffer += data + "\n"
self.fp.write(data + "\n")
class TimerMock(object):
def __init__(self):
self._triggered = False
@property
def triggered(self):
return self._triggered
def ack(self):
assert self._triggered
self._triggered = False
def cancel(self):
del self._triggered
def trigger(self):
assert not self._triggered
self._triggered = True
class CalvinSysTimerMock(object):
def repeat(self, delay):
return TimerMock()
def once(self, delay):
return TimerMock()
class CalvinSysFileMock(object):
def open(self, fname, mode):
return FDMock(fname, mode)
def close(self, fdmock):
fdmock.close()
class ActorTester(object):
def __init__(self):
self.store = ActorStore()
self.actors = {}
self.illegal_actors = {}
self.components = {}
def collect_actors(self, actor):
actors = [m + '.' + a for m in self.store.modules() for a in self.store.actors(m)]
if actor:
actors = [a for a in actors if actor in a]
self.actor_names = actors
def instantiate_actors(self):
for a in self.actor_names:
found, primitive, actorclass = self.store.lookup(a)
if found and primitive:
try:
actor = actorclass(a, disable_state_checks=True)
if not hasattr(actor, 'test_set'):
self.actors[a] = 'no_test'
continue
actor.attach_API("calvinsys", CalvinSys(None))
actor.calvinsys.io.file = CalvinSysFileMock()
actor.calvinsys.events.timer = CalvinSysTimerMock()
actor.init(*actorclass.test_args, **actorclass.test_kwargs)
actor.setup_complete()
except Exception as e:
self.illegal_actors[a] = "Failed to instantiate"
# print "Actor %s: %s" % (a, e)
raise e
for inport in actor.inports.values():
inport.endpoint = DummyInEndpoint(inport)
for outport in actor.outports.values():
outport.fifo.add_reader(actor.id)
self.actors[a] = actor
elif found and not primitive:
self.components[a] = "TODO: Cannot test components (%s)" % (a,)
else:
self.illegal_actors[a] = "Unknown actor - probably parsing issues"
def test_actor(self, actor, aut):
for idx in range(len(aut.test_set)):
test_index = idx + 1
test = aut.test_set[idx]
setups = test.get('setup', [])
inputs = test.get('in', {})
outputs = test.get('out', {})
postconds = test.get('postcond', [])
for f in setups:
try:
f(aut)
except Exception as e:
print "Actor %s failed during setup of test %d: %s" % (actor, test_index, e.message)
raise Exception("Failed during setup of test %d" % (test_index, ))
for port, values in inputs.iteritems():
pwrite(aut, port, values)
aut.fire()
for port, values in outputs.iteritems():
try:
vals = pread(aut, port, len(values))
assert vals == values
except AssertionError as e:
print str(e)
raise AssertionError("Failed test %d" % (test_index,))
if not all(f(aut) for f in postconds):
raise AssertionError("Failed post condition of test %d" % (test_index, ))
return True
def test_actors(self):
test_pass = []
test_fail = {}
no_test = []
for actor in self.actors:
aut = self.actors[actor]
if aut == "no_test":
no_test.append(actor)
continue
try:
self.test_actor(actor, aut)
test_pass.append(actor)
except AssertionError as e:
test_fail[actor] = e.message
# raise e
except Exception as e:
self.illegal_actors[actor] = str(e)
return {'pass': test_pass, 'fail': test_fail, 'skipped': no_test,
'errors': self.illegal_actors, 'components': self.components}
def show_result(header, result):
print header
for actor in result:
print " %s" % (actor, )
def show_issue(header, result):
print header
for actor, reason in result.iteritems():
print " %s: %s" % (actor, reason)
def show_issues(results):
if results['errors']:
show_issue("Actors with errors", results['errors'])
if results['fail']:
show_issue("Failed actor tests", results['fail'])
def show_results(results):
if results['pass']:
show_result("Passed tests", results['pass'])
if results['skipped']:
show_result("Skipped tests", results['skipped'])
if results['components']:
show_issue("Components", results['components'])
show_issues(results)
@pytest.mark.essential
def test_actors(actor="", show=False):
t = ActorTester()
t.collect_actors(actor)
t.instantiate_actors()
results = t.test_actors()
if not any(results.values()):
if actor:
print "No actors matching '%s' found" % (actor,)
else:
raise Exception("No actors found")
if show:
return results
if results['fail'] or results['errors']:
show_issues(results)
if results['errors']:
raise Exception("%d actor(s) had errors" % (len(results['errors']), ))
if results['fail']:
raise Exception("%d actor(s) failed tests" % (len(results['fail']),))
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
show_results(test_actors(actor=sys.argv[1], show=True))
else:
show_results(test_actors(show=True))
|
|
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from past.utils import old_div
import numpy
from . import lib_directional_statistics as lib_direct
numpy.set_printoptions(precision=15)
def get_n_ptrm(tmin, tmax, ptrm_temps, ptrm_starting_temps):
"""
input: tmin, tmax, ptrm_temps, ptrm_starting_temps
returns number of ptrm_checks included in best fit segment.
excludes checks if temp exceeds tmax OR if starting temp exceeds tmax.
output: n_ptrm, ptrm_checks_included_temperatures
"""
# does not exclude ptrm checks that are less than tmin
ptrm_checks_included_temps= []
for num, check in enumerate(ptrm_temps):
if check > tmax:
pass
elif ptrm_starting_temps[num] > tmax: # or ptrm_starting_temps[num] < tmin:
pass
else:
ptrm_checks_included_temps.append(check)
return len(ptrm_checks_included_temps), ptrm_checks_included_temps
def get_max_ptrm_check(ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai):
"""
input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs.
"""
if not ptrm_checks_included_temps:
return [], float('nan'), float('nan'), float('nan'), float('nan')
diffs = []
abs_diffs = []
x_Arai_compare = []
ptrm_compare = []
check_percents = []
ptrm_checks_all_temps = list(ptrm_checks_all_temps)
for check in ptrm_checks_included_temps: # goes through each included temperature step
ptrm_ind = ptrm_checks_all_temps.index(check) # indexes the number of the check
ptrm_check = ptrm_x[ptrm_ind] # x value at that temperature step
ptrm_compare.append(ptrm_check) #
arai_ind = t_Arai.index(check)
ptrm_orig = x_Arai[arai_ind]
x_Arai_compare.append(ptrm_orig)
diff = ptrm_orig - ptrm_check
diffs.append(diff)
abs_diffs.append(abs(diff))
if ptrm_orig == 0:
check_percents.append(0)
else:
check_percents.append((old_div(abs(diff), ptrm_orig)) * 100)
max_diff = max(abs_diffs)
check_percent = max(check_percents)
sum_diffs = abs(sum(diffs))
sum_abs_diffs = sum(abs_diffs)
return diffs, max_diff, sum_diffs, check_percent, sum_abs_diffs
def get_delta_CK(max_ptrm_check, x_int):
"""
Input: max_ptrm_check, x intercept.
Output: delta_CK (max ptrm check normed by x intercept)
"""
return abs(old_div(max_ptrm_check, x_int)) * 100.
def get_DRAT(delta_x_prime, delta_y_prime, max_ptrm_check):
"""
Input: TRM length of best fit line (delta_x_prime),
NRM length of best fit line,
max_ptrm_check
Output: DRAT (maximum difference produced by a ptrm check normed by best fit line),
length best fit line
"""
L = numpy.sqrt(delta_x_prime**2 + delta_y_prime**2)
DRAT = (old_div(max_ptrm_check, L)) * 100
return DRAT, L
def get_length_best_fit_line(delta_x_prime, delta_y_prime):
L = numpy.sqrt(delta_x_prime**2 + delta_y_prime**2)
return L
def get_max_DEV(delta_x_prime, max_ptrm_check):
"""
input: delta_x_prime, max_ptrm_check
output: max_DEV (maximum ptrm check diff normed by TRM line
"""
return (old_div(max_ptrm_check, delta_x_prime)) * 100.
def get_CDRAT(L, sum_ptrm_checks, sum_abs_ptrm_checks):
"""
input: best_fit line length, sum of ptrm check diffs,
sum of absolute value of ptrm check diffs
output: CDRAT (uses sum of diffs), CDRAT_prime (uses sum of absolute diffs)
"""
CDRAT = (old_div(sum_ptrm_checks, L)) * 100.
CDRAT_prime = (old_div(sum_abs_ptrm_checks, L)) * 100.
return CDRAT, CDRAT_prime
def get_DRATS(sum_ptrm_checks, sum_abs_ptrm_checks, x_Arai, end):
"""
input: sum of ptrm check diffs, sum of absolute value of ptrm check diffs,
x_Arai set of points, end.
output: DRATS (uses sum of diffs), DRATS_prime (uses sum of absolute diffs)
"""
DRATS = (old_div(sum_ptrm_checks, x_Arai[end])) * 100.
DRATS_prime = (old_div(sum_abs_ptrm_checks, x_Arai[end])) * 100.
return DRATS, DRATS_prime
def get_mean_DRAT(sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, L):
"""
input: sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, L
output: mean DRAT (the average difference produced by a pTRM check,
normalized by the length of the best-fit line)
"""
if not n_pTRM:
return float('nan'), float('nan')
mean_DRAT = ((old_div(1., n_pTRM)) * (old_div(sum_ptrm_checks, L))) * 100
mean_DRAT_prime = ((old_div(1., n_pTRM)) * (old_div(sum_abs_ptrm_checks, L))) * 100
return mean_DRAT, mean_DRAT_prime
def get_mean_DEV(sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, delta_x_prime):
"""
input: sum_ptrm_checks, sum_abs_ptrm_checks, n_pTRM, delta_x_prime
output: Mean deviation of a pTRM check
"""
if not n_pTRM:
return float('nan'), float('nan')
mean_DEV = ((old_div(1., n_pTRM)) * (old_div(sum_ptrm_checks, delta_x_prime))) * 100
mean_DEV_prime= ((old_div(1., n_pTRM)) * (old_div(sum_abs_ptrm_checks, delta_x_prime))) * 100
return mean_DEV, mean_DEV_prime
def get_delta_pal_vectors(PTRMS, PTRM_Checks, NRM):
""" takes in PTRM data in this format: [temp, dec, inc, moment, ZI or IZ] -- and PTRM_check data in this format: [temp, dec, inc, moment]. Returns them in vector form (cartesian). """
PTRMS = numpy.array(PTRMS)
PTRM_Checks = numpy.array(PTRM_Checks)
TRM_1 = lib_direct.dir2cart(PTRMS[0,1:3])
PTRMS_cart = []
Checks_cart = []
for num, ptrm in enumerate(PTRMS):
ptrm_cart = lib_direct.dir2cart([PTRMS[num][1], PTRMS[num][2], old_div(PTRMS[num][3], NRM)])
PTRMS_cart.append(ptrm_cart)
for num, check in enumerate(PTRM_Checks):
check_cart = lib_direct.dir2cart([PTRM_Checks[num][1], PTRM_Checks[num][2], old_div(PTRM_Checks[num][3], NRM)])
Checks_cart.append(check_cart)
return PTRMS_cart, Checks_cart, TRM_1
def get_diffs(ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig):
"""
input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig
output: vector diffs between original and ptrm check, C
"""
ptrm_temps = numpy.array(ptrms_orig)[:,0]
check_temps = numpy.array(checks_orig)[:,0]
index = numpy.zeros(len(ptrm_temps))
for num, temp in enumerate(ptrm_temps):
if len(numpy.where(check_temps == temp)[0]):
index[num] = numpy.where(check_temps == temp)[0][0]
else:
index[num] = float('nan')
diffs = numpy.zeros((len(ptrms_vectors), 3))
for num, ptrm in enumerate(ptrms_vectors):
if numpy.isnan(index[num]):
diffs[num] = numpy.array([0,0,0])
else:
diffs[num] = ptrm_checks_vectors[int(index[num])] - ptrm
C = numpy.cumsum(diffs, 0)
#print "diffs (should be same as to_sum"
#print diffs
#print "C (should be same as dpal_sum)"
#print C
return diffs, C
def get_TRM_star(C, ptrms_vectors, start, end):
"""
input: C, ptrms_vectors, start, end
output: TRM_star, x_star (for delta_pal statistic)
"""
TRM_star = numpy.zeros([len(ptrms_vectors), 3])
TRM_star[0] = [0., 0., 0.]
x_star = numpy.zeros(len(ptrms_vectors))
for num, vec in enumerate(ptrms_vectors[1:]):
TRM_star[num+1] = vec + C[num]
# print 'vec', vec
# print 'C', C[num]
for num, trm in enumerate(TRM_star):
x_star[num] = numpy.linalg.norm(trm)
#print "x_star (should match corr_TRM / NRM)"
#print x_star[start:end+1]
return TRM_star[start:end+1], x_star[start:end+1]
def get_b_star(x_star, y_err, y_mean, y_segment):
"""
input: x_star, y_err, y_mean, y_segment
output: b_star (corrected slope for delta_pal statistic)
"""
#print "x_star, should be same as Xcorr / NRM"
#print x_star
x_star_mean = numpy.mean(x_star)
x_err = x_star - x_star_mean
b_star = -1* numpy.sqrt( old_div(sum(numpy.array(y_err)**2), sum(numpy.array(x_err)**2)) ) # averaged slope
#print "y_segment", y_segment
b_star = numpy.sign(sum(x_err * y_err)) * numpy.std(y_segment, ddof=1) / numpy.std(x_star, ddof=1)
#print "b_star (should be same as corr_slope)"
#print b_star
return b_star
def get_delta_pal(b, b_star):
"""
input: b, b_star (actual and corrected slope)
output: delta_pal
"""
delta_pal = numpy.abs(old_div((b - b_star), b)) * 100
return delta_pal
def get_full_delta_pal(PTRMS, PTRM_Checks, NRM, y_err, y_mean, b, start, end, y_segment):
"""
input: PTRMS, PTRM_Checks, NRM, y_err, y_mean, b, start, end, y_segment
runs full sequence necessary to get delta_pal
"""
#print "-------"
#print "calling get_full_delta_pal in lib"
# return 0
PTRMS_cart, checks, TRM_1 = get_delta_pal_vectors(PTRMS, PTRM_Checks, NRM)
# print "PTRMS_Cart", PTRMS_cart
diffs, C = get_diffs(PTRMS_cart, checks, PTRMS, PTRM_Checks)
# print "C", C
TRM_star, x_star = get_TRM_star(C, PTRMS_cart, start, end)
# print "x_star", x_star
# print type(x_star)
b_star = get_b_star(x_star, y_err, y_mean, y_segment)
delta_pal = get_delta_pal(b, b_star)
return delta_pal
def get_segments(ptrms, ptrm_checks, tmax):
"""
input: ptrms, ptrm_checks, tmax
grabs ptrms that are done below tmax
grabs ptrm checks that are done below tmax AND whose starting temp is below tmax
output: ptrms_included, checks_included
"""
ptrms_included = []
checks_included = []
ptrms = numpy.array(ptrms)
for ptrm in ptrms:
if ptrm[0] <= tmax:
ptrms_included.append(ptrm)
for check in ptrm_checks:
if check[0] <= tmax:
checks_included.append(check)
#print "checks", ptrm_checks
#print "checks_included", checks_included
return ptrms_included, checks_included
|
|
"""This platform enables the possibility to control a MQTT alarm."""
import functools
import logging
import re
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
CONF_CODE,
CONF_NAME,
CONF_VALUE_TEMPLATE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_DISARMING,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
subscription,
)
from .. import mqtt
from .debug_info import log_messages
from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper
_LOGGER = logging.getLogger(__name__)
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_CODE_DISARM_REQUIRED = "code_disarm_required"
CONF_PAYLOAD_DISARM = "payload_disarm"
CONF_PAYLOAD_ARM_HOME = "payload_arm_home"
CONF_PAYLOAD_ARM_AWAY = "payload_arm_away"
CONF_PAYLOAD_ARM_NIGHT = "payload_arm_night"
CONF_PAYLOAD_ARM_CUSTOM_BYPASS = "payload_arm_custom_bypass"
CONF_COMMAND_TEMPLATE = "command_template"
DEFAULT_COMMAND_TEMPLATE = "{{action}}"
DEFAULT_ARM_NIGHT = "ARM_NIGHT"
DEFAULT_ARM_AWAY = "ARM_AWAY"
DEFAULT_ARM_HOME = "ARM_HOME"
DEFAULT_ARM_CUSTOM_BYPASS = "ARM_CUSTOM_BYPASS"
DEFAULT_DISARM = "DISARM"
DEFAULT_NAME = "MQTT Alarm"
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_CODE_DISARM_REQUIRED, default=True): cv.boolean,
vol.Optional(
CONF_COMMAND_TEMPLATE, default=DEFAULT_COMMAND_TEMPLATE
): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_NIGHT, default=DEFAULT_ARM_NIGHT): cv.string,
vol.Optional(
CONF_PAYLOAD_ARM_CUSTOM_BYPASS, default=DEFAULT_ARM_CUSTOM_BYPASS
): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Required(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT alarm control panel through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT alarm control panel dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, alarm.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT Alarm Control Panel platform."""
async_add_entities([MqttAlarm(hass, config, config_entry, discovery_data)])
class MqttAlarm(MqttEntity, alarm.AlarmControlPanelEntity):
"""Representation of a MQTT alarm status."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Init the MQTT Alarm Control Panel."""
self._state = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
command_template = self._config[CONF_COMMAND_TEMPLATE]
command_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Run when new MQTT message has been received."""
payload = msg.payload
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(
msg.payload, self._state
)
if payload not in (
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_PENDING,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMING,
STATE_ALARM_TRIGGERED,
):
_LOGGER.warning("Received unexpected payload: %s", msg.payload)
return
self._state = payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return (
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_ARM_CUSTOM_BYPASS
)
@property
def code_format(self):
"""Return one or more digits/characters."""
code = self._config.get(CONF_CODE)
if code is None:
return None
if isinstance(code, str) and re.search("^\\d+$", code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
return code_required
async def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_DISARM_REQUIRED]
if code_required and not self._validate_code(code, "disarming"):
return
payload = self._config[CONF_PAYLOAD_DISARM]
self._publish(code, payload)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming home"):
return
action = self._config[CONF_PAYLOAD_ARM_HOME]
self._publish(code, action)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming away"):
return
action = self._config[CONF_PAYLOAD_ARM_AWAY]
self._publish(code, action)
async def async_alarm_arm_night(self, code=None):
"""Send arm night command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming night"):
return
action = self._config[CONF_PAYLOAD_ARM_NIGHT]
self._publish(code, action)
async def async_alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming custom bypass"):
return
action = self._config[CONF_PAYLOAD_ARM_CUSTOM_BYPASS]
self._publish(code, action)
def _publish(self, code, action):
"""Publish via mqtt."""
command_template = self._config[CONF_COMMAND_TEMPLATE]
values = {"action": action, "code": code}
payload = command_template.async_render(**values, parse_result=False)
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def _validate_code(self, code, state):
"""Validate given code."""
conf_code = self._config.get(CONF_CODE)
check = conf_code is None or code == conf_code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
|
|
from textwrap import dedent
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import async_mark
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas._testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f)
result = df.set_index("date").groupby("id").resample("D").asfreq()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"date": date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f)
result = df.groupby("group").resample("1D").ffill()
tm.assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
tm.assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
tm.assert_series_equal(result, expected)
result = g.resample("2s").mean().B
tm.assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
tm.assert_series_equal(result, expected)
result = r["buyer"].count()
tm.assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = DataFrame(
{"id": list("aabbb"), "date": date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
tm.assert_series_equal(result, exp)
def test_groupby_with_origin():
# GH 31809
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = date_range(start, end, freq="1231min") # prime number
ts = Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError, match="Index are different"):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test origin on 1970-01-01 00:00:00
origin = Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
# test origin on 2049-10-18 20:00:00
origin_future = Timestamp(0) + pd.Timedelta("1399min") * 30_000
adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
adjusted2_count_ts = adjusted2_count_ts[middle:end]
adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
# both grouper use an adjusted timestamp that is a multiple of 1399 min
# they should be equals even if the adjusted_timestamp is in the future
tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
def test_nearest():
# GH 17496
# Resample nearest
index = date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
tm.assert_series_equal(result, expected)
def test_methods():
g = test_frame.groupby("A")
r = g.resample("2s")
for f in ["first", "last", "median", "sem", "sum", "mean", "min", "max"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
for f in ["size"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["count"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
# series only
for f in ["nunique"]:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["nearest", "backfill", "ffill", "asfreq"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample("2s").ohlc())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
tm.assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f(x):
return x.resample("2s").sum()
result = r.apply(f)
tm.assert_frame_equal(result, expected)
def f(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f)
# y.sum() results in int64 instead of int32 on 32-bit architectures
expected = expected.astype("int64")
tm.assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
tm.assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
tm.assert_series_equal(result, expected)
def test_apply_columns_multilevel():
# GH 16231
cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
ind = date_range(start="2017-01-01", freq="15Min", periods=8)
df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
expected = DataFrame(
2 * [[0, 0.0]],
index=date_range(start="2017-01-01", freq="1H", periods=2),
columns=pd.MultiIndex.from_tuples(
[("A", "a", "", "one"), ("B", "b", "i", "two")]
),
)
tm.assert_frame_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
tm.assert_frame_equal(result, expected)
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
def test_apply_to_one_column_of_df():
# GH: 36951
df = DataFrame(
{"col": range(10), "col1": range(10, 20)},
index=date_range("2012-01-01", periods=10, freq="20min"),
)
# access "col" via getattr -> make sure we handle AttributeError
result = df.resample("H").apply(lambda group: group.col.sum())
expected = Series(
[3, 12, 21, 9], index=date_range("2012-01-01", periods=4, freq="H")
)
tm.assert_series_equal(result, expected)
# access "col" via _getitem__ -> make sure we handle KeyErrpr
result = df.resample("H").apply(lambda group: group["col"].sum())
tm.assert_series_equal(result, expected)
def test_resample_groupby_agg():
# GH: 33548
df = DataFrame(
{
"cat": [
"cat_1",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
],
"num": [5, 20, 22, 3, 4, 30, 10, 50],
"date": [
"2019-2-1",
"2018-02-03",
"2020-3-11",
"2019-2-2",
"2019-2-2",
"2018-12-4",
"2020-3-11",
"2020-12-12",
],
}
)
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
expected = resampled.sum()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
def test_resample_groupby_agg_listlike():
# GH 42905
ts = Timestamp("2021-02-28 00:00:00")
df = DataFrame({"class": ["beta"], "value": [69]}, index=Index([ts], name="date"))
resampled = df.groupby("class").resample("M")["value"]
result = resampled.agg(["sum", "size"])
expected = DataFrame(
[[69, 1]],
index=pd.MultiIndex.from_tuples([("beta", ts)], names=["class", "date"]),
columns=["sum", "size"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
def test_empty(keys):
# GH 26411
df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([]))
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
expected = DataFrame(columns=["a", "b"]).set_index(keys, drop=False)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("consolidate", [True, False])
def test_resample_groupby_agg_object_dtype_all_nan(consolidate):
# https://github.com/pandas-dev/pandas/issues/39329
dates = date_range("2020-01-01", periods=15, freq="D")
df1 = DataFrame({"key": "A", "date": dates, "col1": range(15), "col_object": "val"})
df2 = DataFrame({"key": "B", "date": dates, "col1": range(15)})
df = pd.concat([df1, df2], ignore_index=True)
if consolidate:
df = df._consolidate()
result = df.groupby(["key"]).resample("W", on="date").min()
idx = pd.MultiIndex.from_arrays(
[
["A"] * 3 + ["B"] * 3,
pd.to_datetime(["2020-01-05", "2020-01-12", "2020-01-19"] * 2),
],
names=["key", "date"],
)
expected = DataFrame(
{
"key": ["A"] * 3 + ["B"] * 3,
"date": pd.to_datetime(["2020-01-01", "2020-01-06", "2020-01-13"] * 2),
"col1": [0, 5, 12] * 2,
"col_object": ["val"] * 3 + [np.nan] * 3,
},
index=idx,
)
tm.assert_frame_equal(result, expected)
|
|
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_device_driver
from neutron.services.loadbalancer import constants as lb_const
from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
DRIVER_NAME = 'haproxy_ns'
IN_BYTES = 0
OUT_BYTES = 1
STATE_PATH_DEFAULT = '$state_path/lbaas'
USER_GROUP_DEFAULT = 'nogroup'
OPTS = [
cfg.StrOpt(
'loadbalancer_state_path',
default=STATE_PATH_DEFAULT,
help=_('Location to store config and state files'),
deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path',
group='DEFAULT')],
),
cfg.StrOpt(
'user_group',
default=USER_GROUP_DEFAULT,
help=_('The user group'),
deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')],
),
cfg.IntOpt(
'send_gratuitous_arp',
default=3,
help=_('When delete and re-add the same vip, send this many '
'gratuitous ARPs to flush the ARP cache in the Router. '
'Set it below or equal to 0 to disable this feature.'),
)
]
cfg.CONF.register_opts(OPTS, 'haproxy')
class StatsCache(object):
def __init__(self):
self.stats = {}
def put(self, pool_id):
if pool_id not in self.stats:
self.stats[pool_id] = [True, [0, 0]]
else:
self.stats[pool_id][0] = True
def remove(self, pool_id):
del self.stats[pool_id]
def set_stats(self, pool_id, stats):
last_stats = self._get_last_stats(pool_id)
self._set_last_stats(pool_id, stats)
stats[lb_const.STATS_IN_BYTES] = int(stats[lb_const.STATS_IN_BYTES])
stats[lb_const.STATS_OUT_BYTES] = int(stats[lb_const.STATS_OUT_BYTES])
if not self._isreboot(pool_id):
stats[lb_const.STATS_IN_BYTES] -= last_stats[lb_const.STATS_IN_BYTES]
stats[lb_const.STATS_OUT_BYTES] -= last_stats[lb_const.STATS_OUT_BYTES]
def _get_last_stats(self, pool_id):
last_stats = {}
last_stats[lb_const.STATS_IN_BYTES] = self.stats[pool_id][1][IN_BYTES]
last_stats[lb_const.STATS_OUT_BYTES] = self.stats[pool_id][1][OUT_BYTES]
return last_stats
def _set_last_stats(self, pool_id, stats):
self.stats[pool_id][1][IN_BYTES] = int(stats[lb_const.STATS_IN_BYTES])
self.stats[pool_id][1][OUT_BYTES] = int(stats[lb_const.STATS_OUT_BYTES])
def _isreboot(self, pool_id):
ret = self.stats[pool_id][0]
self.stats[pool_id][0] = False
return ret
class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
def __init__(self, conf, plugin_rpc):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.state_path = conf.haproxy.loadbalancer_state_path
try:
vif_driver = importutils.import_object(conf.interface_driver, conf)
except ImportError:
with excutils.save_and_reraise_exception():
msg = (_('Error importing interface driver: %s')
% conf.interface_driver)
LOG.error(msg)
self.vif_driver = vif_driver
self.plugin_rpc = plugin_rpc
self.pool_to_port_id = {}
self.stats_cache = StatsCache()
@classmethod
def get_name(cls):
return DRIVER_NAME
def create(self, logical_config):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
self._plug(namespace, logical_config['vip']['port'])
self._spawn(logical_config)
def update(self, logical_config):
pool_id = logical_config['pool']['id']
pid_path = self._get_state_file_path(pool_id, 'pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(logical_config, extra_args)
def _spawn(self, logical_config, extra_cmd_args=()):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
conf_path = self._get_state_file_path(pool_id, 'conf')
pid_path = self._get_state_file_path(pool_id, 'pid')
sock_path = self._get_state_file_path(pool_id, 'sock')
user_group = self.conf.haproxy.user_group
hacfg.save_config(conf_path, logical_config, sock_path, user_group)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
ns.netns.execute(cmd)
# remember the pool<>port mapping
self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
@n_utils.synchronized('haproxy-driver')
def undeploy_instance(self, pool_id, cleanup_namespace=False):
namespace = get_ns_name(pool_id)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
pid_path = self._get_state_file_path(pool_id, 'pid')
# kill the process
kill_pids_in_file(self.root_helper, pid_path)
# unplug the ports
if pool_id in self.pool_to_port_id:
self._unplug(namespace, self.pool_to_port_id[pool_id])
# delete all devices from namespace;
# used when deleting orphans and port_id is not known for pool_id
if cleanup_namespace:
for device in ns.get_devices(exclude_loopback=True):
self.vif_driver.unplug(device.name, namespace=namespace)
# remove the configuration directory
conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
ns.garbage_collect_namespace()
self.stats_cache.remove(pool_id)
def exists(self, pool_id):
namespace = get_ns_name(pool_id)
root_ns = ip_lib.IPWrapper(self.root_helper)
socket_path = self._get_state_file_path(pool_id, 'sock', False)
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def check_process(self, pool_id):
pid_path = self._get_state_file_path(pool_id, 'pid')
try:
pid = [p.strip() for p in open(pid_path, 'r')]
return os.path.exists('/proc/' + str(pid[0]))
except IOError:
return False
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock', False)
TYPE_BACKEND_REQUEST = 2
TYPE_SERVER_REQUEST = 4
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
pool_stats = self._get_backend_stats(parsed_stats)
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warn(_('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
TYPE_BACKEND_RESPONSE = '1'
for stats in parsed_stats:
if stats.get('type') == TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in hacfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
TYPE_SERVER_RESPONSE = '2'
res = {}
for stats in parsed_stats:
if stats.get('type') == TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
if not raw_values:
continue
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return res_stats
def remove_orphans(self, known_pool_ids):
raise NotImplementedError()
def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, pool_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _plug(self, namespace, port, reuse_existing=True):
self.plugin_rpc.plug_vip_port(port['id'])
interface_name = self.vif_driver.get_device_name(Wrap(port))
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port['network_id'],
port['id'],
interface_name,
port['mac_address'],
namespace=namespace
)
cidrs = [
'%s/%s' % (ip['ip_address'],
netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
for ip in port['fixed_ips']
]
self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
if not gw_ip:
host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', [])
for host_route in host_routes:
if host_route['destination'] == "0.0.0.0/0":
gw_ip = host_route['nexthop']
break
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
# When delete and re-add the same vip, we need to
# send gratuitous ARP to flush the ARP cache in the Router.
gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
if gratuitous_arp > 0:
for ip in port['fixed_ips']:
cmd_arping = ['arping', '-U',
'-I', interface_name,
'-c', gratuitous_arp,
ip['ip_address']]
ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
self.plugin_rpc.unplug_vip_port(port_id)
interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
@n_utils.synchronized('haproxy-driver')
def deploy_instance(self, logical_config):
# do actual deploy only if vip and pool are configured and active
if (not logical_config or
'vip' not in logical_config or
(logical_config['vip']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['vip']['admin_state_up'] or
(logical_config['pool']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['pool']['admin_state_up']):
return
if self.check_process(logical_config['pool']['id']):
self.update(logical_config)
else:
self.create(logical_config)
self.stats_cache.put(logical_config['pool']['id'])
def _refresh_device(self, pool_id):
logical_config = self.plugin_rpc.get_logical_device(pool_id)
self.deploy_instance(logical_config)
def create_vip(self, vip):
self._refresh_device(vip['pool_id'])
def update_vip(self, old_vip, vip):
self._refresh_device(vip['pool_id'])
def delete_vip(self, vip):
self.undeploy_instance(vip['pool_id'])
def create_listener(self, pool_id):
self._refresh_device(pool_id)
def delete_listener(self, pool_id):
self._refresh_device(pool_id)
def create_pool(self, pool):
if pool['vip_id']:
self._refresh_device(pool['id'])
def update_pool(self, old_pool, pool):
self._refresh_device(pool['id'])
def delete_pool(self, pool):
# delete_pool may be called before vip deletion in case
# pool's admin state set to down
if self.exists(pool['id']):
self.undeploy_instance(pool['id'])
def create_member(self, member):
self._refresh_device(member['pool_id'])
def update_member(self, old_member, member):
self._refresh_device(member['pool_id'])
def delete_member(self, member):
self._refresh_device(member['pool_id'])
def create_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def update_pool_health_monitor(self, old_health_monitor, health_monitor,
pool_id):
self._refresh_device(pool_id)
def delete_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def remove_orphans(self, known_pool_ids):
if not os.path.exists(self.state_path):
return
orphans = (pool_id for pool_id in os.listdir(self.state_path)
if pool_id not in known_pool_ids)
for pool_id in orphans:
if self.exists(pool_id):
self.undeploy_instance(pool_id, cleanup_namespace=True)
# NOTE (markmcclain) For compliance with interface.py which expects objects
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
def kill_pids_in_file(root_helper, pid_path):
if os.path.exists(pid_path):
with open(pid_path, 'r') as pids:
for pid in pids:
pid = pid.strip()
try:
utils.execute(['kill', '-9', pid], root_helper)
except RuntimeError:
LOG.exception(
_('Unable to kill haproxy process: %s'),
pid
)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AnswerAttachement'
db.create_table('website_answerattachement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('answer_reference', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.AnswerReference'], null=True, blank=True)),
('file_upload', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('file_name', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
))
db.send_create_signal('website', ['AnswerAttachement'])
def backwards(self, orm):
# Deleting model 'AnswerAttachement'
db.delete_table('website_answerattachement')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerattachement': {
'Meta': {'object_name': 'AnswerAttachement'},
'answer_reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerReference']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name_for_url': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '210', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
""" BFD protocol implementation """
from random import randint
from socket import AF_INET, AF_INET6, inet_pton
from scapy.all import bind_layers
from scapy.layers.inet import UDP
from scapy.packet import Packet
from scapy.fields import BitField, BitEnumField, XByteField, FlagsField,\
ConditionalField, StrField
from vpp_object import VppObject
from util import NumericConstant
from vpp_papi import VppEnum
class BFDDiagCode(NumericConstant):
""" BFD Diagnostic Code """
no_diagnostic = 0
control_detection_time_expired = 1
echo_function_failed = 2
neighbor_signaled_session_down = 3
forwarding_plane_reset = 4
path_down = 5
concatenated_path_down = 6
administratively_down = 7
reverse_concatenated_path_down = 8
desc_dict = {
no_diagnostic: "No diagnostic",
control_detection_time_expired: "Control Detection Time Expired",
echo_function_failed: "Echo Function Failed",
neighbor_signaled_session_down: "Neighbor Signaled Session Down",
forwarding_plane_reset: "Forwarding Plane Reset",
path_down: "Path Down",
concatenated_path_down: "Concatenated Path Down",
administratively_down: "Administratively Down",
reverse_concatenated_path_down: "Reverse Concatenated Path Down",
}
class BFDState(NumericConstant):
""" BFD State """
admin_down = 0
down = 1
init = 2
up = 3
desc_dict = {
admin_down: "AdminDown",
down: "Down",
init: "Init",
up: "Up",
}
class BFDAuthType(NumericConstant):
""" BFD Authentication Type """
no_auth = 0
simple_pwd = 1
keyed_md5 = 2
meticulous_keyed_md5 = 3
keyed_sha1 = 4
meticulous_keyed_sha1 = 5
desc_dict = {
no_auth: "No authentication",
simple_pwd: "Simple Password",
keyed_md5: "Keyed MD5",
meticulous_keyed_md5: "Meticulous Keyed MD5",
keyed_sha1: "Keyed SHA1",
meticulous_keyed_sha1: "Meticulous Keyed SHA1",
}
def bfd_is_auth_used(pkt):
""" is packet authenticated? """
return "A" in pkt.sprintf("%BFD.flags%")
def bfd_is_simple_pwd_used(pkt):
""" is simple password authentication used? """
return bfd_is_auth_used(pkt) and pkt.auth_type == BFDAuthType.simple_pwd
def bfd_is_sha1_used(pkt):
""" is sha1 authentication used? """
return bfd_is_auth_used(pkt) and pkt.auth_type in \
(BFDAuthType.keyed_sha1, BFDAuthType.meticulous_keyed_sha1)
def bfd_is_md5_used(pkt):
""" is md5 authentication used? """
return bfd_is_auth_used(pkt) and pkt.auth_type in \
(BFDAuthType.keyed_md5, BFDAuthType.meticulous_keyed_md5)
def bfd_is_md5_or_sha1_used(pkt):
""" is md5 or sha1 used? """
return bfd_is_md5_used(pkt) or bfd_is_sha1_used(pkt)
class BFD(Packet):
""" BFD protocol layer for scapy """
udp_dport = 3784 #: BFD destination port per RFC 5881
udp_dport_echo = 3785 # : BFD destination port for ECHO per RFC 5881
udp_sport_min = 49152 #: BFD source port min value per RFC 5881
udp_sport_max = 65535 #: BFD source port max value per RFC 5881
bfd_pkt_len = 24 # : length of BFD pkt without authentication section
sha1_auth_len = 28 # : length of authentication section if SHA1 used
name = "BFD"
fields_desc = [
BitField("version", 1, 3),
BitEnumField("diag", 0, 5, BFDDiagCode.desc_dict),
BitEnumField("state", 0, 2, BFDState.desc_dict),
FlagsField("flags", 0, 6, ['M', 'D', 'A', 'C', 'F', 'P']),
XByteField("detect_mult", 0),
BitField("length", bfd_pkt_len, 8),
BitField("my_discriminator", 0, 32),
BitField("your_discriminator", 0, 32),
BitField("desired_min_tx_interval", 0, 32),
BitField("required_min_rx_interval", 0, 32),
BitField("required_min_echo_rx_interval", 0, 32),
ConditionalField(
BitEnumField("auth_type", 0, 8, BFDAuthType.desc_dict),
bfd_is_auth_used),
ConditionalField(BitField("auth_len", 0, 8), bfd_is_auth_used),
ConditionalField(BitField("auth_key_id", 0, 8), bfd_is_auth_used),
ConditionalField(BitField("auth_reserved", 0, 8),
bfd_is_md5_or_sha1_used),
ConditionalField(
BitField("auth_seq_num", 0, 32), bfd_is_md5_or_sha1_used),
ConditionalField(StrField("auth_key_hash", "0" * 16), bfd_is_md5_used),
ConditionalField(
StrField("auth_key_hash", "0" * 20), bfd_is_sha1_used),
]
def mysummary(self):
return self.sprintf("BFD(my_disc=%BFD.my_discriminator%,"
"your_disc=%BFD.your_discriminator%)")
# glue the BFD packet class to scapy parser
bind_layers(UDP, BFD, dport=BFD.udp_dport)
class BFD_vpp_echo(Packet):
""" BFD echo packet as used by VPP (non-rfc, as rfc doesn't define one) """
udp_dport = 3785 #: BFD echo destination port per RFC 5881
name = "BFD_VPP_ECHO"
fields_desc = [
BitField("discriminator", 0, 32),
BitField("expire_time_clocks", 0, 64),
BitField("checksum", 0, 64)
]
def mysummary(self):
return self.sprintf(
"BFD_VPP_ECHO(disc=%BFD_VPP_ECHO.discriminator%,"
"expire_time_clocks=%BFD_VPP_ECHO.expire_time_clocks%)")
# glue the BFD echo packet class to scapy parser
bind_layers(UDP, BFD_vpp_echo, dport=BFD_vpp_echo.udp_dport)
class VppBFDAuthKey(VppObject):
""" Represents BFD authentication key in VPP """
def __init__(self, test, conf_key_id, auth_type, key):
self._test = test
self._key = key
self._auth_type = auth_type
test.assertIn(auth_type, BFDAuthType.desc_dict)
self._conf_key_id = conf_key_id
@property
def test(self):
""" Test which created this key """
return self._test
@property
def auth_type(self):
""" Authentication type for this key """
return self._auth_type
@property
def key(self):
""" key data """
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def conf_key_id(self):
""" configuration key ID """
return self._conf_key_id
def add_vpp_config(self):
self.test.vapi.bfd_auth_set_key(
conf_key_id=self._conf_key_id, auth_type=self._auth_type,
key=self._key, key_len=len(self._key))
self._test.registry.register(self, self.test.logger)
def get_bfd_auth_keys_dump_entry(self):
""" get the entry in the auth keys dump corresponding to this key """
result = self.test.vapi.bfd_auth_keys_dump()
for k in result:
if k.conf_key_id == self._conf_key_id:
return k
return None
def query_vpp_config(self):
return self.get_bfd_auth_keys_dump_entry() is not None
def remove_vpp_config(self):
self.test.vapi.bfd_auth_del_key(conf_key_id=self._conf_key_id)
def object_id(self):
return "bfd-auth-key-%s" % self._conf_key_id
class VppBFDUDPSession(VppObject):
""" Represents BFD UDP session in VPP """
def __init__(self, test, interface, peer_addr, local_addr=None, af=AF_INET,
desired_min_tx=300000, required_min_rx=300000, detect_mult=3,
sha1_key=None, bfd_key_id=None, is_tunnel=False):
self._test = test
self._interface = interface
self._af = af
if local_addr:
self._local_addr = local_addr
else:
self._local_addr = None
self._peer_addr = peer_addr
self._desired_min_tx = desired_min_tx
self._required_min_rx = required_min_rx
self._detect_mult = detect_mult
self._sha1_key = sha1_key
if bfd_key_id is not None:
self._bfd_key_id = bfd_key_id
else:
self._bfd_key_id = randint(0, 255)
self._is_tunnel = is_tunnel
@property
def test(self):
""" Test which created this session """
return self._test
@property
def interface(self):
""" Interface on which this session lives """
return self._interface
@property
def af(self):
""" Address family - AF_INET or AF_INET6 """
return self._af
@property
def local_addr(self):
""" BFD session local address (VPP address) """
if self._local_addr is None:
if self.af == AF_INET:
return self._interface.local_ip4
elif self.af == AF_INET6:
return self._interface.local_ip6
else:
raise Exception("Unexpected af '%s'" % self.af)
return self._local_addr
@property
def peer_addr(self):
""" BFD session peer address """
return self._peer_addr
def get_bfd_udp_session_dump_entry(self):
""" get the namedtuple entry from bfd udp session dump """
result = self.test.vapi.bfd_udp_session_dump()
for s in result:
self.test.logger.debug("session entry: %s" % str(s))
if s.sw_if_index == self.interface.sw_if_index:
if self.af == AF_INET \
and self.interface.local_ip4 == str(s.local_addr) \
and self.interface.remote_ip4 == str(s.peer_addr):
return s
if self.af == AF_INET6 \
and self.interface.local_ip6 == str(s.local_addr) \
and self.interface.remote_ip6 == str(s.peer_addr):
return s
return None
@property
def state(self):
""" BFD session state """
session = self.get_bfd_udp_session_dump_entry()
if session is None:
raise Exception("Could not find BFD session in VPP response")
return session.state
@property
def desired_min_tx(self):
""" desired minimum tx interval """
return self._desired_min_tx
@property
def required_min_rx(self):
""" required minimum rx interval """
return self._required_min_rx
@property
def detect_mult(self):
""" detect multiplier """
return self._detect_mult
@property
def sha1_key(self):
""" sha1 key """
return self._sha1_key
@property
def bfd_key_id(self):
""" bfd key id in use """
return self._bfd_key_id
@property
def is_tunnel(self):
return self._is_tunnel
def activate_auth(self, key, bfd_key_id=None, delayed=False):
""" activate authentication for this session """
self._bfd_key_id = bfd_key_id if bfd_key_id else randint(0, 255)
self._sha1_key = key
conf_key_id = self._sha1_key.conf_key_id
is_delayed = 1 if delayed else 0
self.test.vapi.bfd_udp_auth_activate(
sw_if_index=self._interface.sw_if_index,
local_addr=self.local_addr,
peer_addr=self.peer_addr,
bfd_key_id=self._bfd_key_id,
conf_key_id=conf_key_id,
is_delayed=is_delayed)
def deactivate_auth(self, delayed=False):
""" deactivate authentication """
self._bfd_key_id = None
self._sha1_key = None
is_delayed = 1 if delayed else 0
self.test.vapi.bfd_udp_auth_deactivate(
sw_if_index=self._interface.sw_if_index,
local_addr=self.local_addr,
peer_addr=self.peer_addr,
is_delayed=is_delayed)
def modify_parameters(self,
detect_mult=None,
desired_min_tx=None,
required_min_rx=None):
""" modify session parameters """
if detect_mult:
self._detect_mult = detect_mult
if desired_min_tx:
self._desired_min_tx = desired_min_tx
if required_min_rx:
self._required_min_rx = required_min_rx
self.test.vapi.bfd_udp_mod(sw_if_index=self._interface.sw_if_index,
desired_min_tx=self.desired_min_tx,
required_min_rx=self.required_min_rx,
detect_mult=self.detect_mult,
local_addr=self.local_addr,
peer_addr=self.peer_addr)
def add_vpp_config(self):
bfd_key_id = self._bfd_key_id if self._sha1_key else None
conf_key_id = self._sha1_key.conf_key_id if self._sha1_key else None
is_authenticated = True if self._sha1_key else False
self.test.vapi.bfd_udp_add(sw_if_index=self._interface.sw_if_index,
desired_min_tx=self.desired_min_tx,
required_min_rx=self.required_min_rx,
detect_mult=self.detect_mult,
local_addr=self.local_addr,
peer_addr=self.peer_addr,
bfd_key_id=bfd_key_id,
conf_key_id=conf_key_id,
is_authenticated=is_authenticated)
self._test.registry.register(self, self.test.logger)
def upd_vpp_config(self,
detect_mult=None,
desired_min_tx=None,
required_min_rx=None):
if desired_min_tx:
self._desired_min_tx = desired_min_tx
if required_min_rx:
self._required_min_rx = required_min_rx
if detect_mult:
self._detect_mult = detect_mult
bfd_key_id = self._bfd_key_id if self._sha1_key else None
conf_key_id = self._sha1_key.conf_key_id if self._sha1_key else None
is_authenticated = True if self._sha1_key else False
self.test.vapi.bfd_udp_upd(sw_if_index=self._interface.sw_if_index,
desired_min_tx=self.desired_min_tx,
required_min_rx=self.required_min_rx,
detect_mult=self.detect_mult,
local_addr=self.local_addr,
peer_addr=self.peer_addr,
bfd_key_id=bfd_key_id,
conf_key_id=conf_key_id,
is_authenticated=is_authenticated)
self._test.registry.register(self, self.test.logger)
def query_vpp_config(self):
session = self.get_bfd_udp_session_dump_entry()
return session is not None
def remove_vpp_config(self):
self.test.vapi.bfd_udp_del(self._interface.sw_if_index,
local_addr=self.local_addr,
peer_addr=self.peer_addr)
def object_id(self):
return "bfd-udp-%s-%s-%s-%s" % (self._interface.sw_if_index,
self.local_addr,
self.peer_addr,
self.af)
def admin_up(self):
""" set bfd session admin-up """
self.test.vapi.bfd_udp_session_set_flags(
flags=VppEnum.vl_api_if_status_flags_t.IF_STATUS_API_FLAG_ADMIN_UP,
sw_if_index=self._interface.sw_if_index,
local_addr=self.local_addr,
peer_addr=self.peer_addr)
def admin_down(self):
""" set bfd session admin-down """
self.test.vapi.bfd_udp_session_set_flags(
flags=0, sw_if_index=self._interface.sw_if_index,
local_addr=self.local_addr,
peer_addr=self.peer_addr)
|
|
# setup.py - distutils packaging
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
"""Python-PostgreSQL Database Adapter
psycopg2 is a PostgreSQL database adapter for the Python programming
language. psycopg2 was written with the aim of being very small and fast,
and stable as a rock.
psycopg2 is different from the other database adapter because it was
designed for heavily multi-threaded applications that create and destroy
lots of cursors and make a conspicuous number of concurrent INSERTs or
UPDATEs. psycopg2 also provide full asynchronous operations and support
for coroutine libraries.
"""
# note: if you are changing the list of supported Python version please fix
# the docs in install.rst and the /features/ page on the website.
classifiers = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
License :: OSI Approved :: Zope Public License
Programming Language :: Python
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.1
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: C
Programming Language :: SQL
Topic :: Database
Topic :: Database :: Front-Ends
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Microsoft :: Windows
Operating System :: Unix
"""
# Note: The setup.py must be compatible with both Python 2 and 3
import os
import sys
import re
import subprocess
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils.ccompiler import get_default_compiler
from distutils.util import get_platform
try:
from distutils.command.build_py import build_py_2to3
except ImportError:
from distutils.command.build_py import build_py
else:
class build_py(build_py_2to3):
# workaround subclass for ticket #153
pass
# Configure distutils to run our custom 2to3 fixers as well
from lib2to3.refactor import get_fixers_from_package
build_py.fixer_names = get_fixers_from_package('lib2to3.fixes') \
+ [ 'fix_b' ]
sys.path.insert(0, 'scripts')
try:
import configparser
except ImportError:
import ConfigParser as configparser
# Take a look at http://www.python.org/dev/peps/pep-0386/
# for a consistent versioning pattern.
PSYCOPG_VERSION = '2.6'
version_flags = ['dt', 'dec']
PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win')
class PostgresConfig:
def __init__(self, build_ext):
self.build_ext = build_ext
self.pg_config_exe = self.build_ext.pg_config
if not self.pg_config_exe:
self.pg_config_exe = self.autodetect_pg_config_path()
if self.pg_config_exe is None:
sys.stderr.write("""\
Error: pg_config executable not found.
Please add the directory containing pg_config to the PATH
or specify the full executable path with the option:
python setup.py build_ext --pg-config /path/to/pg_config build ...
or with the pg_config option in 'setup.cfg'.
""")
sys.exit(1)
def query(self, attr_name):
"""Spawn the pg_config executable, querying for the given config
name, and return the printed value, sanitized. """
try:
pg_config_process = subprocess.Popen(
[self.pg_config_exe, "--" + attr_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise Warning("Unable to find 'pg_config' file in '%s'" %
self.pg_config_exe)
pg_config_process.stdin.close()
result = pg_config_process.stdout.readline().strip()
if not result:
raise Warning(pg_config_process.stderr.readline())
if not isinstance(result, str):
result = result.decode('ascii')
return result
def find_on_path(self, exename, path_directories=None):
if not path_directories:
path_directories = os.environ['PATH'].split(os.pathsep)
for dir_name in path_directories:
fullpath = os.path.join(dir_name, exename)
if os.path.isfile(fullpath):
return fullpath
return None
def autodetect_pg_config_path(self):
"""Find and return the path to the pg_config executable."""
if PLATFORM_IS_WINDOWS:
return self.autodetect_pg_config_path_windows()
else:
return self.find_on_path('pg_config')
def autodetect_pg_config_path_windows(self):
"""Attempt several different ways of finding the pg_config
executable on Windows, and return its full path, if found."""
# This code only runs if they have not specified a pg_config option
# in the config file or via the commandline.
# First, check for pg_config.exe on the PATH, and use that if found.
pg_config_exe = self.find_on_path('pg_config.exe')
if pg_config_exe:
return pg_config_exe
# Now, try looking in the Windows Registry to find a PostgreSQL
# installation, and infer the path from that.
pg_config_exe = self._get_pg_config_from_registry()
if pg_config_exe:
return pg_config_exe
return None
def _get_pg_config_from_registry(self):
try:
import winreg
except ImportError:
import _winreg as winreg
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
pg_inst_list_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations')
except EnvironmentError:
# No PostgreSQL installation, as best as we can tell.
return None
try:
# Determine the name of the first subkey, if any:
try:
first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0)
except EnvironmentError:
return None
pg_first_inst_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations\\'
+ first_sub_key_name)
try:
pg_inst_base_dir = winreg.QueryValueEx(
pg_first_inst_key, 'Base Directory')[0]
finally:
winreg.CloseKey(pg_first_inst_key)
finally:
winreg.CloseKey(pg_inst_list_key)
pg_config_path = os.path.join(
pg_inst_base_dir, 'bin', 'pg_config.exe')
if not os.path.exists(pg_config_path):
return None
# Support unicode paths, if this version of Python provides the
# necessary infrastructure:
if sys.version_info[0] < 3 \
and hasattr(sys, 'getfilesystemencoding'):
pg_config_path = pg_config_path.encode(
sys.getfilesystemencoding())
return pg_config_path
class psycopg_build_ext(build_ext):
"""Conditionally complement the setup.cfg options file.
This class configures the include_dirs, library_dirs, libraries
options as required by the system. Most of the configuration happens
in finalize_options() method.
If you want to set up the build step for a peculiar platform, add a
method finalize_PLAT(), where PLAT matches your sys.platform.
"""
user_options = build_ext.user_options[:]
user_options.extend([
('use-pydatetime', None,
"Use Python datatime objects for date and time representation."),
('pg-config=', None,
"The name of the pg_config binary and/or full path to find it"),
('have-ssl', None,
"Compile with OpenSSL built PostgreSQL libraries (Windows only)."),
('static-libpq', None,
"Statically link the PostgreSQL client library"),
])
boolean_options = build_ext.boolean_options[:]
boolean_options.extend(('use-pydatetime', 'have-ssl', 'static-libpq'))
def __init__(self, *args, **kwargs):
build_ext.__init__(self, *args, **kwargs)
def initialize_options(self):
build_ext.initialize_options(self)
self.use_pg_dll = 1
self.pgdir = None
self.mx_include_dir = None
self.use_pydatetime = 1
self.have_ssl = have_ssl
self.static_libpq = static_libpq
self.pg_config = None
def compiler_is_msvc(self):
return self.get_compiler_name().lower().startswith('msvc')
def compiler_is_mingw(self):
return self.get_compiler_name().lower().startswith('mingw')
def get_compiler_name(self):
"""Return the name of the C compiler used to compile extensions.
If a compiler was not explicitly set (on the command line, for
example), fall back on the default compiler.
"""
if self.compiler:
# distutils doesn't keep the type of self.compiler uniform; we
# compensate:
if isinstance(self.compiler, str):
name = self.compiler
else:
name = self.compiler.compiler_type
else:
name = get_default_compiler()
return name
def get_export_symbols(self, extension):
# Fix MSVC seeing two of the same export symbols.
if self.compiler_is_msvc():
return []
else:
return build_ext.get_export_symbols(self, extension)
def build_extension(self, extension):
build_ext.build_extension(self, extension)
sysVer = sys.version_info[:2]
# For Python versions that use MSVC compiler 2008, re-insert the
# manifest into the resulting .pyd file.
if self.compiler_is_msvc() and sysVer not in ((2, 4), (2, 5)):
platform = get_platform()
# Default to the x86 manifest
manifest = '_psycopg.vc9.x86.manifest'
if platform == 'win-amd64':
manifest = '_psycopg.vc9.amd64.manifest'
try:
ext_path = self.get_ext_fullpath(extension.name)
except AttributeError:
ext_path = os.path.join(self.build_lib,
'psycopg2', '_psycopg.pyd')
self.compiler.spawn(
['mt.exe', '-nologo', '-manifest',
os.path.join('psycopg', manifest),
'-outputresource:%s;2' % ext_path])
def finalize_win32(self):
"""Finalize build system configuration on win32 platform."""
sysVer = sys.version_info[:2]
# Add compiler-specific arguments:
extra_compiler_args = []
if self.compiler_is_mingw():
# Default MinGW compilation of Python extensions on Windows uses
# only -O:
extra_compiler_args.append('-O3')
# GCC-compiled Python on non-Windows platforms is built with strict
# aliasing disabled, but that must be done explicitly on Windows to
# avoid large numbers of warnings for perfectly idiomatic Python C
# API code.
extra_compiler_args.append('-fno-strict-aliasing')
# Force correct C runtime library linkage:
if sysVer <= (2, 3):
# Yes: 'msvcr60', rather than 'msvcrt', is the correct value
# on the line below:
self.libraries.append('msvcr60')
elif sysVer in ((2, 4), (2, 5)):
self.libraries.append('msvcr71')
# Beyond Python 2.5, we take our chances on the default C runtime
# library, because we don't know what compiler those future
# versions of Python will use.
for extension in ext: # ext is a global list of Extension objects
extension.extra_compile_args.extend(extra_compiler_args)
# End of add-compiler-specific arguments section.
self.libraries.append("ws2_32")
self.libraries.append("advapi32")
if self.compiler_is_msvc():
# MSVC requires an explicit "libpq"
self.libraries.remove("pq")
self.libraries.append("secur32")
self.libraries.append("libpq")
self.libraries.append("shfolder")
for path in self.library_dirs:
if os.path.isfile(os.path.join(path, "ms", "libpq.lib")):
self.library_dirs.append(os.path.join(path, "ms"))
break
if self.have_ssl:
self.libraries.append("libeay32")
self.libraries.append("ssleay32")
self.libraries.append("crypt32")
self.libraries.append("user32")
self.libraries.append("gdi32")
def finalize_darwin(self):
"""Finalize build system configuration on darwin platform."""
self.libraries.append('ssl')
self.libraries.append('crypto')
def finalize_linux(self):
"""Finalize build system configuration on GNU/Linux platform."""
# tell piro that GCC is fine and dandy, but not so MS compilers
for extension in self.extensions:
extension.extra_compile_args.append(
'-Wdeclaration-after-statement')
finalize_linux2 = finalize_linux
finalize_linux3 = finalize_linux
def finalize_options(self):
"""Complete the build system configuration."""
build_ext.finalize_options(self)
pg_config_helper = PostgresConfig(self)
self.include_dirs.append(".")
if self.static_libpq:
if not getattr(self, 'link_objects', None):
self.link_objects = []
self.link_objects.append(
os.path.join(pg_config_helper.query("libdir"), "libpq.a"))
else:
self.libraries.append("pq")
try:
self.library_dirs.append(pg_config_helper.query("libdir"))
self.include_dirs.append(pg_config_helper.query("includedir"))
self.include_dirs.append(pg_config_helper.query("includedir-server"))
try:
# Here we take a conservative approach: we suppose that
# *at least* PostgreSQL 7.4 is available (this is the only
# 7.x series supported by psycopg 2)
pgversion = pg_config_helper.query("version").split()[1]
except:
pgversion = "7.4.0"
verre = re.compile(
r"(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+))")
m = verre.match(pgversion)
if m:
pgmajor, pgminor, pgpatch = m.group(1, 2, 3)
if pgpatch is None or not pgpatch.isdigit():
pgpatch = 0
pgmajor = int(pgmajor)
pgminor = int(pgminor)
pgpatch = int(pgpatch)
else:
sys.stderr.write(
"Error: could not determine PostgreSQL version from '%s'"
% pgversion)
sys.exit(1)
define_macros.append(("PG_VERSION_HEX", "0x%02X%02X%02X" %
(pgmajor, pgminor, pgpatch)))
# enable lo64 if libpq >= 9.3 and Python 64 bits
if (pgmajor, pgminor) >= (9, 3) and is_py_64():
define_macros.append(("HAVE_LO64", "1"))
# Inject the flag in the version string already packed up
# because we didn't know the version before.
# With distutils everything is complicated.
for i, t in enumerate(define_macros):
if t[0] == 'PSYCOPG_VERSION':
n = t[1].find(')')
if n > 0:
define_macros[i] = (
t[0], t[1][:n] + ' lo64' + t[1][n:])
except Warning:
w = sys.exc_info()[1] # work around py 2/3 different syntax
sys.stderr.write("Error: %s\n" % w)
sys.exit(1)
if hasattr(self, "finalize_" + sys.platform):
getattr(self, "finalize_" + sys.platform)()
def is_py_64():
# sys.maxint not available since Py 3.1;
# sys.maxsize not available before Py 2.6;
# this is portable at least between Py 2.4 and 3.4.
import struct
return struct.calcsize("P") > 4
# let's start with macro definitions (the ones not already in setup.cfg)
define_macros = []
include_dirs = []
# gather information to build the extension module
ext = []
data_files = []
# sources
sources = [
'psycopgmodule.c',
'green.c', 'pqpath.c', 'utils.c', 'bytes_format.c',
'connection_int.c', 'connection_type.c',
'cursor_int.c', 'cursor_type.c',
'diagnostics_type.c', 'error_type.c',
'lobject_int.c', 'lobject_type.c',
'notify_type.c', 'xid_type.c',
'adapter_asis.c', 'adapter_binary.c', 'adapter_datetime.c',
'adapter_list.c', 'adapter_pboolean.c', 'adapter_pdecimal.c',
'adapter_pint.c', 'adapter_pfloat.c', 'adapter_qstring.c',
'microprotocols.c', 'microprotocols_proto.c',
'typecast.c',
]
depends = [
# headers
'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h',
'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h',
'notify.h', 'pqpath.h', 'xid.h',
'adapter_asis.h', 'adapter_binary.h', 'adapter_datetime.h',
'adapter_list.h', 'adapter_pboolean.h', 'adapter_pdecimal.h',
'adapter_pint.h', 'adapter_pfloat.h', 'adapter_qstring.h',
'microprotocols.h', 'microprotocols_proto.h',
'typecast.h', 'typecast_binary.h',
# included sources
'typecast_array.c', 'typecast_basic.c', 'typecast_binary.c',
'typecast_builtins.c', 'typecast_datetime.c',
]
parser = configparser.ConfigParser()
parser.read('setup.cfg')
# Choose a datetime module
have_pydatetime = True
have_mxdatetime = False
use_pydatetime = int(parser.get('build_ext', 'use_pydatetime'))
# check for mx package
if parser.has_option('build_ext', 'mx_include_dir'):
mxincludedir = parser.get('build_ext', 'mx_include_dir')
else:
mxincludedir = os.path.join(get_python_inc(plat_specific=1), "mx")
if os.path.exists(mxincludedir):
# Build the support for mx: we will check at runtime if it can be imported
include_dirs.append(mxincludedir)
define_macros.append(('HAVE_MXDATETIME', '1'))
sources.append('adapter_mxdatetime.c')
depends.extend(['adapter_mxdatetime.h', 'typecast_mxdatetime.c'])
have_mxdatetime = True
version_flags.append('mx')
# now decide which package will be the default for date/time typecasts
if have_pydatetime and (use_pydatetime or not have_mxdatetime):
define_macros.append(('PSYCOPG_DEFAULT_PYDATETIME', '1'))
elif have_mxdatetime:
define_macros.append(('PSYCOPG_DEFAULT_MXDATETIME', '1'))
else:
error_message = """\
psycopg requires a datetime module:
mx.DateTime module not found
python datetime module not found
Note that psycopg needs the module headers and not just the module
itself. If you installed Python or mx.DateTime from a binary package
you probably need to install its companion -dev or -devel package."""
for line in error_message.split("\n"):
sys.stderr.write("error: " + line)
sys.exit(1)
# generate a nice version string to avoid confusion when users report bugs
version_flags.append('pq3') # no more a choice
version_flags.append('ext') # no more a choice
if version_flags:
PSYCOPG_VERSION_EX = PSYCOPG_VERSION + " (%s)" % ' '.join(version_flags)
else:
PSYCOPG_VERSION_EX = PSYCOPG_VERSION
if not PLATFORM_IS_WINDOWS:
define_macros.append(('PSYCOPG_VERSION', '"' + PSYCOPG_VERSION_EX + '"'))
else:
define_macros.append(('PSYCOPG_VERSION', '\\"' + PSYCOPG_VERSION_EX + '\\"'))
if parser.has_option('build_ext', 'have_ssl'):
have_ssl = int(parser.get('build_ext', 'have_ssl'))
else:
have_ssl = 0
if parser.has_option('build_ext', 'static_libpq'):
static_libpq = int(parser.get('build_ext', 'static_libpq'))
else:
static_libpq = 0
# And now... explicitly add the defines from the .cfg files.
# Looks like setuptools or some other cog doesn't add them to the command line
# when called e.g. with "pip -e git+url'. This results in declarations
# duplicate on the commandline, which I hope is not a problem.
for define in parser.get('build_ext', 'define').split(','):
if define:
define_macros.append((define, '1'))
# build the extension
sources = [ os.path.join('psycopg', x) for x in sources]
depends = [ os.path.join('psycopg', x) for x in depends]
ext.append(Extension("psycopg2._psycopg", sources,
define_macros=define_macros,
include_dirs=include_dirs,
depends=depends,
undef_macros=[]))
# Compute the direct download url.
# Note that the current package installation programs are stupidly intelligent
# and will try to install a beta if they find a link in the homepage instead of
# using these pretty metadata. But that's their problem, not ours.
download_url = (
"http://initd.org/psycopg/tarballs/PSYCOPG-%s/psycopg2-%s.tar.gz"
% ('-'.join(PSYCOPG_VERSION.split('.')[:2]), PSYCOPG_VERSION))
try:
f = open("README.rst")
readme = f.read()
f.close()
except:
print("failed to read readme: ignoring...")
readme = __doc__
setup(name="psycopg2",
version=PSYCOPG_VERSION,
maintainer="Federico Di Gregorio",
maintainer_email="fog@initd.org",
author="Federico Di Gregorio",
author_email="fog@initd.org",
url="http://initd.org/psycopg/",
download_url=download_url,
license="LGPL with exceptions or ZPL",
platforms=["any"],
description=readme.split("\n")[0],
long_description="\n".join(readme.split("\n")[2:]).lstrip(),
classifiers=[x for x in classifiers.split("\n") if x],
data_files=data_files,
package_dir={'psycopg2': 'lib', 'psycopg2.tests': 'tests'},
packages=['psycopg2', 'psycopg2.tests'],
cmdclass={
'build_ext': psycopg_build_ext,
'build_py': build_py, },
ext_modules=ext)
|
|
"""Support for Belkin WeMo lights."""
from __future__ import annotations
import asyncio
from typing import Any, Optional, cast
from pywemo.ouimeaux_device import bridge
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.color as color_util
from .const import DOMAIN as WEMO_DOMAIN
from .entity import WemoBinaryStateEntity, WemoEntity
from .wemo_device import DeviceCoordinator
SUPPORT_WEMO = (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_TRANSITION
)
# The WEMO_ constants below come from pywemo itself
WEMO_OFF = 0
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up WeMo lights."""
async def _discovered_wemo(coordinator: DeviceCoordinator) -> None:
"""Handle a discovered Wemo device."""
if isinstance(coordinator.wemo, bridge.Bridge):
async_setup_bridge(hass, config_entry, async_add_entities, coordinator)
else:
async_add_entities([WemoDimmer(coordinator)])
async_dispatcher_connect(hass, f"{WEMO_DOMAIN}.light", _discovered_wemo)
await asyncio.gather(
*(
_discovered_wemo(coordinator)
for coordinator in hass.data[WEMO_DOMAIN]["pending"].pop("light")
)
)
@callback
def async_setup_bridge(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
coordinator: DeviceCoordinator,
) -> None:
"""Set up a WeMo link."""
known_light_ids = set()
@callback
def async_update_lights() -> None:
"""Check to see if the bridge has any new lights."""
new_lights = []
for light_id, light in coordinator.wemo.Lights.items():
if light_id not in known_light_ids:
known_light_ids.add(light_id)
new_lights.append(WemoLight(coordinator, light))
if new_lights:
async_add_entities(new_lights)
async_update_lights()
config_entry.async_on_unload(coordinator.async_add_listener(async_update_lights))
class WemoLight(WemoEntity, LightEntity):
"""Representation of a WeMo light."""
def __init__(self, coordinator: DeviceCoordinator, light: bridge.Light) -> None:
"""Initialize the WeMo light."""
super().__init__(coordinator)
self.light = light
self._unique_id = self.light.uniqueID
self._model_name = type(self.light).__name__
@property
def name(self) -> str:
"""Return the name of the device if any."""
return cast(str, self.light.name)
@property
def available(self) -> bool:
"""Return true if the device is available."""
return super().available and self.light.state.get("available")
@property
def unique_id(self) -> str:
"""Return the ID of this light."""
return cast(str, self.light.uniqueID)
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
connections={(CONNECTION_ZIGBEE, self._unique_id)},
identifiers={(WEMO_DOMAIN, self._unique_id)},
manufacturer="Belkin",
model=self._model_name,
name=self.name,
)
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return cast(int, self.light.state.get("level", 255))
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hs color values of this light."""
if xy_color := self.light.state.get("color_xy"):
return color_util.color_xy_to_hs(*xy_color)
return None
@property
def color_temp(self) -> int | None:
"""Return the color temperature of this light in mireds."""
return cast(Optional[int], self.light.state.get("temperature_mireds"))
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return cast(int, self.light.state.get("onoff")) != WEMO_OFF
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_WEMO
def turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
xy_color = None
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
color_temp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
transition_time = int(kwargs.get(ATTR_TRANSITION, 0))
if hs_color is not None:
xy_color = color_util.color_hs_to_xy(*hs_color)
turn_on_kwargs = {
"level": brightness,
"transition": transition_time,
"force_update": False,
}
with self._wemo_exception_handler("turn on"):
if xy_color is not None:
self.light.set_color(xy_color, transition=transition_time)
if color_temp is not None:
self.light.set_temperature(
mireds=color_temp, transition=transition_time
)
self.light.turn_on(**turn_on_kwargs)
self.schedule_update_ha_state()
def turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
transition_time = int(kwargs.get(ATTR_TRANSITION, 0))
with self._wemo_exception_handler("turn off"):
self.light.turn_off(transition=transition_time)
self.schedule_update_ha_state()
class WemoDimmer(WemoBinaryStateEntity, LightEntity):
"""Representation of a WeMo dimmer."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1 and 100."""
wemo_brightness: int = self.wemo.get_brightness()
return int((wemo_brightness * 255) / 100)
def turn_on(self, **kwargs: Any) -> None:
"""Turn the dimmer on."""
# Wemo dimmer switches use a range of [0, 100] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
brightness = int((brightness / 255) * 100)
with self._wemo_exception_handler("set brightness"):
self.wemo.set_brightness(brightness)
else:
with self._wemo_exception_handler("turn on"):
self.wemo.on()
self.schedule_update_ha_state()
def turn_off(self, **kwargs: Any) -> None:
"""Turn the dimmer off."""
with self._wemo_exception_handler("turn off"):
self.wemo.off()
self.schedule_update_ha_state()
|
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
class test_message_compose(TestMail):
def setUp(self):
super(test_message_compose, self).setUp()
# create a 'pigs' and 'bird' groups that will be used through the various tests
self.group_bird_id = self.mail_group.create(self.cr, self.uid,
{'name': 'Bird', 'description': 'I am angry !'})
def test_00_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
self.res_users.write(cr, uid, [uid], {'signature': 'Admin', 'email': 'a@a.a'})
user_admin = self.res_users.browse(cr, uid, uid)
p_a_id = user_admin.partner_id.id
group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
group_bird = self.mail_group.browse(cr, uid, self.group_bird_id)
# Mail data
_subject1 = 'Pigs'
_subject2 = 'Bird'
_body_html1 = 'Fans of Pigs, unite !'
_body_html2 = 'I am angry !'
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': base64.b64encode('My first attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': base64.b64encode('My second attachment'), 'res_model': 'res.partner', 'res_id': self.partner_admin_id},
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# Create template on mail.group, with attachments
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': False,
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])],
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d'
})
# ----------------------------------------
# CASE1: comment and save as template
# ----------------------------------------
# 1. Comment on pigs
compose_id = mail_compose.create(cr, uid,
{'subject': 'Forget me subject', 'body': '<p>Dummy body</p>'},
{'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]})
compose = mail_compose.browse(cr, uid, compose_id)
# 2. Save current composition form as a template
mail_compose.save_as_template(cr, uid, [compose_id], context={'default_model': 'mail.group'})
# Test: email_template subject, body_html, model
last_template_id = email_template.search(cr, uid, [('model', '=', 'mail.group'), ('subject', '=', 'Forget me subject')], limit=1)[0]
self.assertTrue(last_template_id, 'email_template not found for model mail.group, subject Forget me subject')
last_template = email_template.browse(cr, uid, last_template_id)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
# ----------------------------------------
# CASE2: comment with template, save as template
# ----------------------------------------
# 1. Comment on pigs
context = {
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_use_template': False,
'default_template_id': email_template_id,
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'comment', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# Test: mail.compose.message: subject, body, partner_ids
self.assertEqual(compose.subject, _subject1, 'mail.compose.message subject incorrect')
self.assertIn(_body_html1, compose.body, 'mail.compose.message body incorrect')
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# Test: mail.compose.message: attachments (owner has not been modified)
for attach in compose.attachment_ids:
self.assertEqual(attach.res_model, 'res.partner', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.partner_admin_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# Test: mail.message: attachments
mail_compose.send_mail(cr, uid, [compose_id])
group_pigs.refresh()
message_pigs = group_pigs.message_ids[0]
for attach in message_pigs.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group', 'mail.compose.message attachment res_model through templat was overriden')
self.assertEqual(attach.res_id, self.group_pigs_id, 'mail.compose.message attachment res_id incorrect')
self.assertIn((attach.datas_fname, base64.b64decode(attach.datas)), _attachments_test,
'mail.message attachment name / data incorrect')
# ----------------------------------------
# CASE3: mass_mail with template
# ----------------------------------------
# 1. Mass_mail on pigs and bird, with a default_partner_ids set to check he is correctly added
context = {
'default_composition_mode': 'mass_mail',
'default_notify': True,
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
'default_template_id': email_template_id,
'default_partner_ids': [p_a_id],
'active_ids': [self.group_pigs_id, self.group_bird_id]
}
compose_id = mail_compose.create(cr, uid, {'subject': 'Forget me subject', 'body': 'Dummy body'}, context)
compose = mail_compose.browse(cr, uid, compose_id, context)
onchange_res = compose.onchange_template_id(email_template_id, 'mass_mail', 'mail.group', self.group_pigs_id)['value']
onchange_res['partner_ids'] = [(4, partner_id) for partner_id in onchange_res.pop('partner_ids', [])]
onchange_res['attachment_ids'] = [(4, attachment_id) for attachment_id in onchange_res.pop('attachment_ids', [])]
compose.write(onchange_res)
compose.refresh()
message_pids = [partner.id for partner in compose.partner_ids]
partner_ids = [p_a_id]
self.assertEqual(compose.subject, '${object.name}', 'mail.compose.message subject incorrect')
self.assertEqual(compose.body, '<p>${object.description}</p>', 'mail.compose.message body incorrect') # todo: check signature
self.assertEqual(set(message_pids), set(partner_ids), 'mail.compose.message partner_ids incorrect')
# 2. Post the comment, get created message
mail_compose.send_mail(cr, uid, [compose_id], {'default_res_id': -1, 'active_ids': [self.group_pigs_id, self.group_bird_id]})
group_pigs.refresh()
group_bird.refresh()
message_pigs = group_pigs.message_ids[0]
message_bird = group_bird.message_ids[0]
# Test: subject, body
self.assertEqual(message_pigs.subject, _subject1, 'mail.message subject on Pigs incorrect')
self.assertEqual(message_bird.subject, _subject2, 'mail.message subject on Bird incorrect')
self.assertIn(_body_html1, message_pigs.body, 'mail.message body on Pigs incorrect')
self.assertIn(_body_html2, message_bird.body, 'mail.message body on Bird incorrect')
# Test: partner_ids: p_a_id (default) + 3 newly created partners
# message_pigs_pids = [partner.id for partner in message_pigs.notified_partner_ids]
# message_bird_pids = [partner.id for partner in message_bird.notified_partner_ids]
# partner_ids = self.res_partner.search(cr, uid, [('email', 'in', ['b@b.b', 'c@c.c', 'd@d.d'])])
# partner_ids.append(p_a_id)
# self.assertEqual(set(message_pigs_pids), set(partner_ids), 'mail.message on pigs incorrect number of notified_partner_ids')
# self.assertEqual(set(message_bird_pids), set(partner_ids), 'mail.message on bird notified_partner_ids incorrect')
# ----------------------------------------
# CASE4: test newly introduced partner_to field
# ----------------------------------------
# get already-created partners back
p_b_id = self.res_partner.search(cr, uid, [('email', '=', 'b@b.b')])[0]
p_c_id = self.res_partner.search(cr, uid, [('email', '=', 'c@c.c')])[0]
p_d_id = self.res_partner.search(cr, uid, [('email', '=', 'd@d.d')])[0]
# modify template: use partner_to, use template and email address in email_to to test all features together
user_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'res.users')])[0]
email_template.write(cr, uid, [email_template_id], {
'model_id': user_model_id,
'body_html': '${object.login}',
'email_to': '${object.email}, c@c',
'partner_to': '%i,%i' % (p_b_id, p_c_id),
'email_cc': 'd@d',
})
# patner by email + partner by id (no double)
send_to = [p_a_id, p_b_id, p_c_id, p_d_id]
# Generate messsage with default email and partner on template
mail_value = mail_compose.generate_email_for_composer(cr, uid, email_template_id, uid)
self.assertEqual(set(mail_value['partner_ids']), set(send_to), 'mail.message partner_ids list created by template is incorrect')
@mute_logger('openerp.models')
def test_10_email_templating(self):
""" Tests designed for the mail.compose.message wizard updated by email_template. """
cr, uid, context = self.cr, self.uid, {}
# create the email.template on mail.group model
group_model_id = self.registry('ir.model').search(cr, uid, [('model', '=', 'mail.group')])[0]
email_template = self.registry('email.template')
email_template_id = email_template.create(cr, uid, {
'model_id': group_model_id,
'name': 'Pigs Template',
'email_from': 'Raoul Grosbedon <raoul@example.com>',
'subject': '${object.name}',
'body_html': '${object.description}',
'user_signature': True,
'email_to': 'b@b.b, c@c.c',
'email_cc': 'd@d.d',
'partner_to': '${user.partner_id.id},%s,%s,-1' % (self.user_raoul.partner_id.id, self.user_bert.partner_id.id)
})
# not force send: email_recipients is not taken into account
msg_id = email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, context=context)
mail = self.mail_mail.browse(cr, uid, msg_id, context=context)
self.assertEqual(mail.subject, 'Pigs', 'email_template: send_mail: wrong subject')
self.assertEqual(mail.email_to, 'b@b.b, c@c.c', 'email_template: send_mail: wrong email_to')
self.assertEqual(mail.email_cc, 'd@d.d', 'email_template: send_mail: wrong email_cc')
self.assertEqual(
set([partner.id for partner in mail.recipient_ids]),
set((self.partner_admin_id, self.user_raoul.partner_id.id, self.user_bert.partner_id.id)),
'email_template: send_mail: wrong management of partner_to')
# force send: take email_recipients into account
email_template.send_mail(cr, uid, email_template_id, self.group_pigs_id, force_send=True, context=context)
sent_emails = self._build_email_kwargs_list
email_to_lst = [
['b@b.b', 'c@c.c'], ['Administrator <admin@yourcompany.example.com>'],
['Raoul Grosbedon <raoul@raoul.fr>'], ['Bert Tartignole <bert@bert.fr>']]
self.assertEqual(len(sent_emails), 4, 'email_template: send_mail: 3 valid email recipients + email_to -> should send 4 emails')
for email in sent_emails:
self.assertIn(email['email_to'], email_to_lst, 'email_template: send_mail: wrong email_recipients')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from stixels_evaluation import StixelsEvaluationApplication, Bunch, Recording
import collections
options = Bunch()
#options.ground_truth_path = "/users/visics/rbenenso/data/bertan_datasets/Zurich/bahnhof/annotations/bahnhof-annot.idl"
options.ground_truth_path = "/home/rodrigob/data/bahnhof/annotations/bahnhof-annot.idl"
#options.ground_truth_path = "/users/visics/rbenenso/data/bertan_datasets/Zurich/bahnhof/annotations/bahnhof-annot-local-filtered.idl"
#options.ground_truth_path = "/users/visics/rbenenso/data/bertan_datasets/Zurich/bahnhof/annotations/Annotations.idl"
#options.recordings = dict()
options.recordings = collections.OrderedDict()
base_recordings_path = "/users/visics/rbenenso/code/doppia/src/applications/stixel_world/"
rss2011_recordings_path = "/users/visics/rbenenso/data/2011_rss_paper_results/applications/stixel_world/"
iros2011_recordings_path = "/users/visics/rbenenso/data/2011_iros_paper_results/applications/stixel_world/"
iccv2011_workshop_recordings_path = "/users/visics/rbenenso/data/2011_iccv_workshop_paper_results/applications/stixel_world/"
eccv2012_workshop_recordings_path = "/home/rodrigob/code/doppia/src/applications/stixel_world/"
if False:
options.recordings["fixed initial_results all_bboxes 1.8m 50px"] = \
Recording(
directory = base_recordings_path + "2011_01_05_67521_recordings_first_results",
frontmost_bboxes_only = False)
options.recordings["fixed all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_15_59709_recordings_fixed_height",
frontmost_bboxes_only = False)
options.recordings["fixed residual_image all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_15_61191_recordings_fixed_height_and_residual_image",
frontmost_bboxes_only = False)
options.recordings["fixed no_postfiltering all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_66016_recordings_stixel_world_to_use_with_fixed_height_with_no_post_filtering",
frontmost_bboxes_only = False)
options.recordings["fixed no_postfiltering residual_ground with_1to1_ratio all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_70193_recordings_stixel_world_to_use_with_fixed_height_with_no_post_filtering_and_residual_ground_WITH_BUG",
frontmost_bboxes_only = False)
options.recordings["fixed no_postfiltering residual_ground with_1to1.5_ratio all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_72941_recordings_stixel_world_to_use_with_fixed_height_with_no_post_filtering_and_residual_ground_with_1.5_ratio",
frontmost_bboxes_only = False)
if False:
options.recordings["two_steps partial_depth enforced_height all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_15_75456_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_and_enforced_heights",
frontmost_bboxes_only = False)
options.recordings["two_steps partial_depth without_enforced_height all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_15_75790_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_and_without_enforced_heights",
frontmost_bboxes_only = False)
options.recordings["two_steps full_depth enforced_height all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_15_76098_recordings_stixel_world_to_use_with_two_steps_uses_full_depth_map_and_enforced_heights",
frontmost_bboxes_only = False)
options.recordings["two_steps no_postfiltering residual_ground without_enforced_height all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_70426_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_without_enforced_heights_with_residual_ground",
frontmost_bboxes_only = False)
options.recordings["two_steps no_postfiltering residual_ground enforced_pixel_height all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_73880_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_enforced_heights_with_residual_ground_with_1.5_ratio",
frontmost_bboxes_only = False)
options.recordings["two_steps no_postfiltering residual_ground enforced_pixel_height_12 ratio_1to1 all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_85243_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_12_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
options.recordings["two_steps no_postfiltering residual_ground enforced_pixel_height_20 ratio_1to1 all_bboxes"] = \
Recording(
directory = base_recordings_path + "2011_01_17_85378_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
# no_postfiltering residual_ground with_1to1_ratio all_bboxes
options.recordings["fixed"] = \
Recording(
directory = base_recordings_path + "2011_01_17_70193_recordings_stixel_world_to_use_with_fixed_height_with_no_post_filtering_and_residual_ground_WITH_BUG",
frontmost_bboxes_only = False)
# estimated partial depth
# two_steps no_postfiltering residual_ground enforced_pixel_height_20 ratio_1to1 all_bboxes
options.recordings["estimated partial depth height60"] = \
Recording(
directory = base_recordings_path + "2011_01_17_85378_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
# estimated full depth
# two_steps full_depth enforced_height all_bboxes
options.recordings["estimated full depth"] = \
Recording(
directory = base_recordings_path + "2011_01_15_76098_recordings_stixel_world_to_use_with_two_steps_uses_full_depth_map_and_enforced_heights",
frontmost_bboxes_only = False)
# two_steps full_depth enforced_height all_bboxes
options.recordings["estimated partial depth height50 pixels12"] = \
Recording(
directory = base_recordings_path + "2011_01_18_4371_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_12_enforced_heights_with_residual_ground_with_1.1_ratio_with_height50",
frontmost_bboxes_only = False)
options.recordings["estimated partial depth height50 pixels20"] = \
Recording(
directory = base_recordings_path + "2011_01_18_7563_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio_with_height50",
frontmost_bboxes_only = False)
options.recordings["estimated full depth height50 pixels20"] = \
Recording(
directory = base_recordings_path + "2011_01_18_7843_recordings_stixel_world_to_use_with_two_steps_uses_full_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
options.recordings["fixed height50"] = \
Recording(
directory = base_recordings_path + "2011_01_18_16836_recordings_stixel_world_to_use_with_fixed_with_residual_ground_with_1to1_ratio_with_height50",
frontmost_bboxes_only = False)
if False:
# final plot of rss2011 submission
options.recordings["rss2011 estimated height, partial depth"] = \
Recording(
directory = rss2011_recordings_path + "2011_01_18_7563_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio_with_height50",
frontmost_bboxes_only = False)
options.recordings["rss2011 estimated height, full depth"] = \
Recording(
directory = rss2011_recordings_path + "2011_01_18_7843_recordings_stixel_world_to_use_with_two_steps_uses_full_depth_map_with_pixels_20_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
options.recordings["rss2011 fixed height"] = \
Recording(
directory = rss2011_recordings_path + "2011_01_18_16836_recordings_stixel_world_to_use_with_fixed_with_residual_ground_with_1to1_ratio_with_height50",
frontmost_bboxes_only = False)
if False:
# a few baseline methods
options.recordings["rss2011 fixed initial_results all_bboxes 1.8m 50px"] = \
Recording(
directory = rss2011_recordings_path + "2011_01_05_67521_recordings_first_results",
frontmost_bboxes_only = False)
options.recordings["baseline method"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_24_61080_recordings_baseline",
frontmost_bboxes_only = False)
if False:
# iros2011 experiments
options.recordings["with_ground_cost_miroring_high_cost_weight"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_24_59621_recordings_with_ground_cost_miroring_high_cost_weight",
frontmost_bboxes_only = False)
options.recordings["without_ground_cost_miroring_high_cost_weight"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_24_60410_recordings_without_ground_cost_miroring_high_cost_weight",
frontmost_bboxes_only = False)
options.recordings["with_ground_cost_miroring_normal_cost_weight"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_24_64214_recordings_with_ground_cost_miroring_normal_cost_weight",
frontmost_bboxes_only = False)
options.recordings["without_ground_cost"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_25_35465_recordings_without_ground_cost",
frontmost_bboxes_only = False)
options.recordings["ground_cost_threshold_0.3"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_25_41072_recordings_with_ground_cost_threshold_0.3",
frontmost_bboxes_only = False)
options.recordings["u_disparity_boundary_diagonal_weight_0.0"] = \
Recording(
directory = iros2011_recordings_path + "2011_02_25_50355_recordings_u_disparity_boundary_diagonal_weight_0.0",
frontmost_bboxes_only = False)
if False:
# more iros2011 experiments
options.recordings["2011_03_09_no_enforcement"] = \
Recording(
directory = iros2011_recordings_path + "2011_03_10_38201_recordings_no_enforcement",
frontmost_bboxes_only = False)
if False:
# plots of iros2011 submission
#options.recordings["2011_03_09_fixed"] = \
options.recordings["fixed height ours"] = \
Recording(
directory = iros2011_recordings_path + "2011_03_09_70217_recordings_fixed",
frontmost_bboxes_only = False)
#options.recordings["2011_03_09_two_steps"] = \
options.recordings["estimated height ours"] = \
Recording(
directory = iros2011_recordings_path + "2011_03_09_72241_recordings_two_steps",
frontmost_bboxes_only = False)
if True and False:
# plots of iccv2011 workshop submission
options.recordings["fixed height csbp"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_07_62311_recordings_bahnhof_fixed_csbp",
frontmost_bboxes_only = False)
options.recordings["estimated height csbp"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_07_54576_recordings_bahnhof_two_steps_csbp",
frontmost_bboxes_only = False)
options.recordings["fixed height simple sad"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_07_60542_recordings_bahnhof_fixed_simple_sad_w_9",
frontmost_bboxes_only = False)
options.recordings["estimated height simple sad"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_07_57673_recordings_bahnhof_two_steps_simple_sad_w_9",
frontmost_bboxes_only = False)
if False:
options.recordings["estimated height csbp no constraints"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_07_68630_recordings_bahnhof_two_steps_csbp_no_constraints_no_filtering",
frontmost_bboxes_only = False)
options.recordings["estimated height csbp no filtering"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_08_45509_recordings_bahnhof_two_steps_csbp_no_filtering",
frontmost_bboxes_only = False)
options.recordings["estimated height simple sad no filtering"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_08_50800_recordings_bahnhof_two_steps_simple_wad_w_9_no_filtering",
frontmost_bboxes_only = False)
if False:
options.recordings["estimated height gpu_trees no filtering"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_12_67071_recordings_bahnhof_two_steps_gpu_trees",
frontmost_bboxes_only = False)
options.recordings["estimated height opencv sad gray"] = \
Recording(
directory = iccv2011_workshop_recordings_path + "2011_07_13_72596_recordings_bahnhof_two_steps_opencv_sad_gray",
frontmost_bboxes_only = False)
if False:
# ground filtering seems to provide only a very small improvement over no filtering
options.recordings["estimated height fast ground filtering"] = \
Recording(
directory = base_recordings_path + "2011_07_20_52297_recordings_fast_stixels_with_ground_filtering",
frontmost_bboxes_only = False)
options.recordings["estimated height fast ground filtering height smoothing"] = \
Recording(
directory = base_recordings_path + "2011_07_20_66376_recordings_fast_stixels_with_ground_filtering_with_height_cost_smoothing",
frontmost_bboxes_only = False)
if False:
options.recordings["estimated height 3 pixels wide"] = \
Recording(
directory = base_recordings_path + "2011_07_19_66013_recordings_3_pixels_stixels",
frontmost_bboxes_only = False)
options.recordings["estimated height fast"] = \
Recording(
directory = base_recordings_path + "2011_07_20_67563_recordings_fast_stixels",
frontmost_bboxes_only = False)
options.recordings["estimated height fast residual ground"] = \
Recording(
directory = base_recordings_path + "2011_07_20_69135_recordings_fast_residual_ground",
frontmost_bboxes_only = False)
options.recordings["estimated height residual ground"] = \
Recording(
directory = base_recordings_path + "2011_07_20_71630_recordings_residual_ground",
frontmost_bboxes_only = False)
#options.recordings["estimated height fast residual ground cost volume margin"] = \
# Recording(
# directory = base_recordings_path + "2011_07_21_48014_recordings_fast_ground_residual_cost_volume_margin",
# frontmost_bboxes_only = False)
options.recordings["estimated height not fast residual ground cost volume margin"] = \
Recording(
directory = base_recordings_path + "2011_07_21_49363_recordings_not_fast_ground_residual_cost_volume_margin",
frontmost_bboxes_only = False)
if False:
# same results as in iros2011 submission 2011_03_09_72241_recordings_two_steps
options.recordings["estimated height iros2011_03_09 baseline"] = \
Recording(
directory = "/users/visics/rbenenso/code/doppia_iros2011/src/applications/stixel_world/" \
+ "2011_07_21_54447_recordings_iros2011_03_09_baseline",
frontmost_bboxes_only = False)
if False:
# only for the histogram
options.recordings["fixed"] = \
Recording(
directory = base_recordings_path + "2011_01_17_70193_recordings_stixel_world_to_use_with_fixed_height_with_no_post_filtering_and_residual_ground_WITH_BUG",
frontmost_bboxes_only = False)
options.recordings["estimated"] = \
Recording(
directory = base_recordings_path + "2011_01_17_85243_recordings_stixel_world_to_use_with_two_steps_uses_partial_depth_map_with_pixels_12_enforced_heights_with_residual_ground_with_1to1_ratio",
frontmost_bboxes_only = False)
# 2011_11_18_55646_recordings_vanilla_gpu_very_fast_over_bahnhof
if False:
# plots of eccv2012 workshop submission,
# varying stixel width
options.recordings["stixel width 1"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_39132_recordings_support_1_width_1_row_steps_128",
frontmost_bboxes_only = False)
#options.recordings["stixel width 2"] = \
# Recording(
# directory = eccv2012_workshop_recordings_path + "2012_07_06_43330_recordings_support_1_width_2_row_steps_128",
# frontmost_bboxes_only = False)
options.recordings["stixel width 3"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_40293_recordings_support_1_width_3_row_steps_128",
frontmost_bboxes_only = False)
#options.recordings["stixel width 4"] = \
# Recording(
# directory = eccv2012_workshop_recordings_path + "2012_07_06_42336_recordings_support_1_width_4_row_steps_128",
# frontmost_bboxes_only = False)
options.recordings["stixel width 5"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_41631_recordings_support_1_width_5_row_steps_128",
frontmost_bboxes_only = False)
options.recordings["stixel width 7"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_42210_recordings_support_1_width_7_row_steps_128",
frontmost_bboxes_only = False)
options.recordings["stixel width 11"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_41779_recordings_support_1_width_11_row_steps_128",
frontmost_bboxes_only = False)
options.recordings["stixel width 21"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_41994_recordings_support_1_width_21_row_steps_128",
frontmost_bboxes_only = False)
options.recordings["stixel v1"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_42473_recordings_stixels_v1_max_disparity_128",
frontmost_bboxes_only = False)
#options.recordings["stixel v1 (d = 80)"] = \
# Recording(
# directory = eccv2012_workshop_recordings_path + "2012_07_06_42598_recordings_stixels_v1_max_disparity_80",
# frontmost_bboxes_only = False)
if True:
# plots of eccv2012 workshop submission,
# varying number of row bands
options.recordings["128 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_40293_recordings_support_1_width_3_row_steps_128",
frontmost_bboxes_only = False)
options.recordings["50 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_42764_recordings_support_1_width_3_row_steps_50",
frontmost_bboxes_only = False)
options.recordings["25 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_43073_recordings_support_1_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.recordings["15 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_43185_recordings_support_1_width_3_row_steps_15",
frontmost_bboxes_only = False)
options.recordings["10 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_43824_recordings_support_1_width_3_row_steps_10",
frontmost_bboxes_only = False)
options.recordings["5 row bands"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_43431_recordings_support_1_width_3_row_steps_5",
frontmost_bboxes_only = False)
if False:
# plots of eccv2012 workshop submission,
# varying stixel support width
options.recordings["support width 1"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_43073_recordings_support_1_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.recordings["support width 2"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_44175_recordings_support_2_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.recordings["support width 3"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_44365_recordings_support_3_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.recordings["support width 5"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_44444_recordings_support_5_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.recordings["support width 10"] = \
Recording(
directory = eccv2012_workshop_recordings_path + "2012_07_06_44644_recordings_support_10_width_3_row_steps_25",
frontmost_bboxes_only = False)
options.max_num_frames = -1
#options.max_num_frames = 5 # just for debugging
#options.max_num_frames = 900
options.minimum_box_height = 0
def render_paper_graphs():
"""
Render the final graphs for RSS2011/ICCV2011 workshop/ECCV2012 workshop paper
"""
application = StixelsEvaluationApplication()
application.run(options)
return
if __name__ == '__main__':
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
#print("(psyco not found)")
pass
else:
print("(using psyco)")
render_paper_graphs()
|
|
#!/usr/bin/python
import sys, time, os, signal, imp, argparse, logging, logging.handlers, traceback
class Parsible(object):
def import_plugins(self):
# Initialize our lists
self.processors = []
# Map our directory names to the prefixes on methods we want to check out.
plugin_mappings = {
'parsers' : 'parse',
'processors' : 'process'
}
# Figure out where we are and start looking for plugins
current_file = os.path.abspath(__file__)
current_directory = os.path.abspath(os.path.join(current_file, os.path.pardir))
plugins_directory = current_directory + "/plugins"
# Iterate through our potential plugin locations so we can import everything
# IMPORTANT: Without this block we can't use the buzzword 'Autodiscover', very necessary
for plugin_type in plugin_mappings.keys():
directory = plugins_directory + "/" + plugin_type
for f in os.listdir(directory):
if f.endswith(".py") and not f.startswith("_"):
# Get the name of the file for importing
parser_file = f.rpartition(".")[0]
# Import the file so we can inspect the methods inside
__import__('plugins.%s.%s' % (plugin_type, parser_file))
for method in dir(sys.modules["plugins.%s.%s" % (plugin_type, parser_file)]):
# Filter down to methods that have the appropriate prefix
if method.startswith(plugin_mappings[plugin_type]):
_temp = __import__('plugins.%s.%s' % (plugin_type, parser_file), globals(), locals(), [method], -1)
# Figure out what we should do with our newly discovered methods
if "parse" in plugin_mappings[plugin_type]:
if self.parser is not None:
if method == self.parser:
setattr(self, "parsing_function", getattr(_temp, method))
else:
# Set the first parser we find, overwrite this laster if we find one specified
setattr(self, "parsing_function", getattr(_temp, method))
# Construct our list of processing functions that we will call on each parsed line
elif "process" in plugin_mappings[plugin_type]:
self.processors.append(getattr(_temp, method))
def set_logging(self):
self.logger = logging.getLogger('parsible')
handler = logging.handlers.RotatingFileHandler('../logs/parsinator.log', mode='a', maxBytes=2*10**7, backupCount=1)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s - %(message)s'))
self.logger.addHandler(handler)
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.info("logging initialized")
self.logger.debug("logging set to DEBUG")
def __init__(self, input_file, parser, pid_file, debug, batch, auto_reload):
self.debug = debug
self.batch = batch
self.auto_reload = auto_reload
self.set_logging()
# Some messy business to import unknown files at runtime, cool stuff inside
self.parser = parser
self.import_plugins()
# Keep internal references to these so we can change and refresh them properly
self.input_file = input_file
self.pid_file = pid_file
def parsible_exit(self, status):
os.remove(self.pid_file)
sys.exit(status)
def load_file(self):
try:
self.log_file = open(self.input_file)
except IOError:
print "Unable to open log file"
self.parsible_exit(1)
def reload_file(self, signum, frame):
self.log_file.close()
self.load_file()
return
def reload_file_if_changed(self):
# Get the Inode for our current file
loaded_file_inode = os.fstat(self.log_file.fileno()).st_ino
# Check the inode of the file path that was specified
current_file = open(self.input_file)
current_file_inode = os.fstat(current_file.fileno()).st_ino
current_file.close()
# Reload if there is a discrepancy
if loaded_file_inode != current_file_inode:
self.reload_file(None, None)
self.logger.debug('Log File Changed, Reloading...')
else:
self.logger.debug('Log File Unchanged')
return
def _get_current_byte_position(self):
return self.log_file.tell()
def _get_file_byte_length(self):
return os.path.getsize(self.input_file)
def _run_periodic_functions(self):
"""
Functions that need to be run periodicly for system statistics,
such as logging the current file progression.
"""
current = self._get_current_byte_position()
size = self._get_file_byte_length()
percent = ( float(current) / float(size) ) * 100
self.logger.error('File Statistics: Current Byte Location {current}'.format(current=current))
self.logger.error('File Statistics: Current File Byte Size {size}'.format(size=size))
self.logger.error('File Statistics: Processed Percentage {percent:.2f} %'.format(percent=percent))
def set_pid_file(self):
# All this to set up a PID file
f = open(self.pid_file, 'w')
PID = str(os.getpid())
f.write(PID)
f.flush()
f.close()
# Set up a callback for SigUSR1 (kill -30 or kill -USR1)
signal.signal(signal.SIGUSR1, self.reload_file)
def follow(self):
# Shamelessly drafted from http://www.dabeaz.com/generators/Generators.pdf
empty_iterations = 0
tick = 0
if not self.batch:
# Go to the end of the file for tailing, otherwise we start at the beginning
self.log_file.seek(0,2)
while True:
# Get our latest line (via a Generator) or None if nothing new is in place
line = self.log_file.readline()
if not line:
if self.batch:
self.logger.debug('Ending Batch Run')
raise StopIteration
#if self.debug:
#self.logger.debug('Tick Tock, waited for {} iterations'.format(empty_iterations))
# Essentially spinlock on our logfile waiting for updates to become available
# Depending on update speed this iteration time can be decreased
empty_iterations += 1
time.sleep(0.1)
if self.auto_reload:
# If waiting for new lines, check once per 10 seconds to reload
if empty_iterations > 100:
empty_iterations = 0
self.reload_file_if_changed()
continue
empty_iterations = 0
tick += 1
tick = tick % 100
if tick == 0:
# Check every 100 iterations
self._run_periodic_functions()
# Yield so we can be called as a generator, decoupling the waiting issues.
# Our parsing function can be evaluated later
yield self.parsing_function(line)
def run_processors(self, line):
for process in self.processors:
try:
process(line)
except Exception, e:
# We can add some custom logic if needed, such as counting how many lines have issues
# For now we just swallow errors, since the spice must flow, err parsing must continue.
if self.debug:
self.logger.debug(e)
traceback.print_exc(file=sys.stdout)
continue
def main(self):
try:
# Being a good UNIX Citizen
self.set_pid_file()
self.load_file()
# Get our Generator Reference
parsed_log_file = self.follow()
# Abstract all the messy generator logic away into a simple for-each
for parsed_line in parsed_log_file:
# The processors should take care of outputting data as they see fit
if self.debug:
self.logger.debug(parsed_line)
self.run_processors(parsed_line)
# We probably will never reach here, but it pays to be tidy just in case we change code in the future
self.log_file.close()
self.parsible_exit(0)
except Exception(e):
# for certain we will reach here at some point!
self.logger.error("caught exception, shutting down")
self.logger.error(e)
self.log_file.close()
self.parsible_exit(0)
if __name__ == '__main__':
# Just setting up command line arguments.
# Only thing interesting here is the defaults set for some options. You can skip this trying to get to the meat.
cmdline = argparse.ArgumentParser(usage="usage: parsible.py --log-file /var/log/mylog [options]",
description="Tail a log file and filter each line to generate metrics that can be output to any desired endpoint.")
cmdline.add_argument('--log-file',
'-l',
action='store',
help='The absolute path to the log file to be parsed, Ex: /var/log/mylog',
dest='input_file',
required=True
)
cmdline.add_argument('--parser',
'-p',
action='store',
help='Name of the parsing method to use, should start with "parse_", Ex: parse_nginx If this is not set, Parsible will use the first parser found.',
dest='parser',
default=None
)
cmdline.add_argument('--pid-file',
'-f',
action='store',
help='Absolute path to use for the PID file, Ex: /tmp/parsible.pid',
dest='pid_file',
default='/tmp/parsible.pid'
)
cmdline.add_argument('--debug',
'-d',
action='store',
help='Enable Debugging',
dest='debug',
default=False
)
cmdline.add_argument('--batch-mode',
'-b',
action='store',
help='If Set, Parsible will start at the top of the log file and exit once it reaches the end. Useful for processing logs that are not realtime',
dest='batch',
default=False
)
cmdline.add_argument('--auto-reload',
'-a',
action='store',
help='If Set, when receiving empty lines Parsible will check if there is a discrepancy between the stored and existing file descriptors for the log file. If a discrepancy is found, Parsible will reload the new file.',
dest='auto_reload',
default=False
)
args = cmdline.parse_args()
p = Parsible(args.input_file, args.parser, args.pid_file, args.debug, args.batch, args.auto_reload)
p.main()
|
|
from __future__ import absolute_import, division, print_function
from collections import Iterator, Mapping
import itertools
import datashape
from datashape import discover, Tuple, Record, DataShape, var
from datashape.predicates import (
istabular,
isscalar,
isrecord,
)
from odo import resource
from odo.utils import ignoring, copydoc
from ..compatibility import _strtypes
from ..dispatch import dispatch
from .expressions import sanitized_dshape, Symbol
__all__ = ['BoundSymbol', 'Literal', 'Data', 'literal', 'data']
_names = ('_%d' % i for i in itertools.count(1))
not_an_iterator = []
with ignoring(ImportError):
import bcolz
not_an_iterator.append(bcolz.carray)
with ignoring(ImportError):
import pymongo
not_an_iterator.append(pymongo.collection.Collection)
not_an_iterator.append(pymongo.database.Database)
class generate(object):
"""A sentinel value, indicating whether or not `literal` should
generate a name for the returned `BoundSymbol`.
"""
def __new__(cls):
raise NotImplementedError('Can not create instance of sentinel type.')
class BoundSymbol(Symbol):
# NOTE: This docstring is meant to correspond to the ``literal()`` and
# ``data()`` APIs, which is why the Parameters section doesn't match the
# arguments to ``Literal.__new__()``.
"""Bind a data resource to a symbol, for use in expressions and
computation.
A ``data`` object presents a consistent view onto a variety of concrete
data sources. Like ``symbol`` objects, they are meant to be used in
expressions. Because they are tied to concrete data resources, ``data``
objects can be used with ``compute`` directly, making them convenient for
interactive exploration.
Parameters
----------
data_source : object
Any type with ``discover`` and ``compute`` implementations
fields : list, optional
Field or column names, will be inferred from data_source if possible
dshape : str or DataShape, optional
DataShape describing input data
name : str, optional
A name for the data.
Examples
--------
>>> t = data([(1, 'Alice', 100),
... (2, 'Bob', -200),
... (3, 'Charlie', 300),
... (4, 'Denis', 400),
... (5, 'Edith', -500)],
... fields=['id', 'name', 'balance'])
>>> t[t.balance < 0].name.peek()
name
0 Bob
1 Edith
"""
_arguments = 'data', 'dshape', '_name'
def _resources(self):
return {self: self.data}
@property
def _token(self):
return 0
@classmethod
def _static_identity(cls, data, dshape, _name):
try:
# cannot use isinstance(data, Hashable)
# some classes give a false positive
hash(data)
except TypeError:
data = id(data)
return cls, data, dshape, _name
def __str__(self):
name = self._name
return name if name is not None else repr(self)
class Literal(BoundSymbol):
def __repr__(self):
name = self._name
return name if name is not None else repr(self.data)
class Data(BoundSymbol):
def __repr__(self):
return "<'{}' data; _name='{}', dshape='{}'>".format(
type(self.data).__name__,
self._name,
sanitized_dshape(self.dshape),
)
def _bound_symbol(cls,
data_source,
dshape,
name,
fields,
schema,
**kwargs):
if schema and dshape:
raise ValueError(
'Please specify one of schema= or dshape= keyword arguments',
)
if isinstance(data_source, BoundSymbol):
return _bound_symbol(
cls,
data_source.data,
dshape,
name,
fields,
schema,
**kwargs
)
if schema and not dshape:
dshape = var * schema
if dshape and isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(data_source, _strtypes):
data_source = resource(
data_source,
schema=schema,
dshape=dshape,
**kwargs
)
if (isinstance(data_source, Iterator) and
not isinstance(data_source, tuple(not_an_iterator))):
data_source = tuple(data_source)
if not dshape:
dshape = discover(data_source)
types = None
if isinstance(dshape.measure, Tuple) and fields:
types = dshape[1].dshapes
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
elif isscalar(dshape.measure) and fields:
types = (dshape.measure,) * int(dshape[-2])
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape[:-1] + (schema,)))
elif isrecord(dshape.measure) and fields:
ds = discover(data_source)
assert isrecord(ds.measure)
names = ds.measure.names
if names != fields:
raise ValueError(
'data column names %s\n'
'\tnot equal to fields parameter %s,\n'
'\tuse data(data_source).relabel(%s) to rename '
'fields' % (
names,
fields,
', '.join(
'%s=%r' % (k, v)
for k, v in
zip(names, fields)
),
),
)
types = dshape.measure.types
schema = Record(list(zip(fields, types)))
dshape = DataShape(*(dshape.shape + (schema,)))
ds = datashape.dshape(dshape)
if name is generate:
if istabular(dshape):
name = next(_names)
else:
name = None
return cls(data_source, ds, name)
@copydoc(BoundSymbol)
def literal(data_source,
dshape=None,
name=None,
fields=None,
schema=None,
**kwargs):
return _bound_symbol(
Literal,
data_source,
dshape=dshape,
name=name,
fields=fields,
schema=schema,
**kwargs
)
@copydoc(BoundSymbol)
def data(data_source,
dshape=None,
name=generate,
fields=None,
schema=None,
**kwargs):
return _bound_symbol(
Data,
data_source,
dshape=dshape,
name=name,
fields=fields,
schema=schema,
**kwargs
)
@dispatch(BoundSymbol, Mapping)
def _subs(o, d):
return o
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import LooseVersion
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[f.write(line) for line in lines if line.strip()]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
from copy import deepcopy
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.contrib.operators.ecs_operator import ECSOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
RESPONSE_WITHOUT_FAILURES = {
"failures": [],
"tasks": [
{
"containers": [
{
"containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868",
"lastStatus": "PENDING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55"
}
],
"desiredStatus": "RUNNING",
"lastStatus": "PENDING",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11"
}
]
}
class TestECSOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def setUp(self, aws_hook_mock):
configuration.load_test_config()
self.aws_hook_mock = aws_hook_mock
self.ecs = ECSOperator(
task_id='task',
task_definition='t',
cluster='c',
overrides={},
aws_conn_id=None,
region_name='eu-west-1')
def test_init(self):
self.assertEqual(self.ecs.region_name, 'eu-west-1')
self.assertEqual(self.ecs.task_definition, 't')
self.assertEqual(self.ecs.aws_conn_id, None)
self.assertEqual(self.ecs.cluster, 'c')
self.assertEqual(self.ecs.overrides, {})
self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.ecs.template_fields, ('overrides',))
@mock.patch.object(ECSOperator, '_wait_for_task_ended')
@mock.patch.object(ECSOperator, '_check_success_task')
def test_execute_without_failures(self, check_mock, wait_mock):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t'
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(self.ecs.arn, 'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES)
resp_failures['failures'].append('dummy error')
client_mock.run_task.return_value = resp_failures
with self.assertRaises(AirflowException):
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t'
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
self.ecs._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with('tasks_stopped')
client_mock.get_waiter.return_value.wait.assert_called_once_with(cluster='c', tasks=['arn'])
self.assertEquals(sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'lastStatus': 'STOPPED',
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is not in success state ", str(e.exception))
self.assertIn("'name': 'foo'", str(e.exception))
self.assertIn("'lastStatus': 'STOPPED'", str(e.exception))
self.assertIn("'exitCode': 1", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'PENDING'
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is still pending ", str(e.exception))
self.assertIn("'name': 'container-name'", str(e.exception))
self.assertIn("'lastStatus': 'PENDING'", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_mutliple(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'exitCode': 1
}, {
'name': 'bar',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import gobject
import pygst
pygst.require('0.10')
import gst # noqa
import gst.pbutils # noqa
import pykka
from mopidy import exceptions
from mopidy.audio import icy, utils
from mopidy.audio.constants import PlaybackState
from mopidy.audio.listener import AudioListener
from mopidy.internal import deprecation, process
logger = logging.getLogger(__name__)
# This logger is only meant for debug logging of low level gstreamer info such
# as callbacks, event, messages and direct interaction with GStreamer such as
# set_state on a pipeline.
gst_logger = logging.getLogger('mopidy.audio.gst')
icy.register()
_GST_STATE_MAPPING = {
gst.STATE_PLAYING: PlaybackState.PLAYING,
gst.STATE_PAUSED: PlaybackState.PAUSED,
gst.STATE_NULL: PlaybackState.STOPPED}
class _Signals(object):
"""Helper for tracking gobject signal registrations"""
def __init__(self):
self._ids = {}
def connect(self, element, event, func, *args):
"""Connect a function + args to signal event on an element.
Each event may only be handled by one callback in this implementation.
"""
assert (element, event) not in self._ids
self._ids[(element, event)] = element.connect(event, func, *args)
def disconnect(self, element, event):
"""Disconnect whatever handler we have for and element+event pair.
Does nothing it the handler has already been removed.
"""
signal_id = self._ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def clear(self):
"""Clear all registered signal handlers."""
for element, event in self._ids.keys():
element.disconnect(self._ids.pop((element, event)))
# TODO: expose this as a property on audio?
class _Appsrc(object):
"""Helper class for dealing with appsrc based playback."""
def __init__(self):
self._signals = _Signals()
self.reset()
def reset(self):
"""Reset the helper.
Should be called whenever the source changes and we are not setting up
a new appsrc.
"""
self.prepare(None, None, None, None)
def prepare(self, caps, need_data, enough_data, seek_data):
"""Store info we will need when the appsrc element gets installed."""
self._signals.clear()
self._source = None
self._caps = caps
self._need_data_callback = need_data
self._seek_data_callback = seek_data
self._enough_data_callback = enough_data
def configure(self, source):
"""Configure the supplied source for use.
Should be called whenever we get a new appsrc.
"""
source.set_property('caps', self._caps)
source.set_property('format', b'time')
source.set_property('stream-type', b'seekable')
source.set_property('max-bytes', 1 << 20) # 1MB
source.set_property('min-percent', 50)
if self._need_data_callback:
self._signals.connect(source, 'need-data', self._on_signal,
self._need_data_callback)
if self._seek_data_callback:
self._signals.connect(source, 'seek-data', self._on_signal,
self._seek_data_callback)
if self._enough_data_callback:
self._signals.connect(source, 'enough-data', self._on_signal, None,
self._enough_data_callback)
self._source = source
def push(self, buffer_):
if self._source is None:
return False
if buffer_ is None:
gst_logger.debug('Sending appsrc end-of-stream event.')
return self._source.emit('end-of-stream') == gst.FLOW_OK
else:
return self._source.emit('push-buffer', buffer_) == gst.FLOW_OK
def _on_signal(self, element, clocktime, func):
# This shim is used to ensure we always return true, and also handles
# that not all the callbacks have a time argument.
if clocktime is None:
func()
else:
func(utils.clocktime_to_millisecond(clocktime))
return True
# TODO: expose this as a property on audio when #790 gets further along.
class _Outputs(gst.Bin):
def __init__(self):
gst.Bin.__init__(self, 'outputs')
self._tee = gst.element_factory_make('tee')
self.add(self._tee)
ghost_pad = gst.GhostPad('sink', self._tee.get_pad('sink'))
self.add_pad(ghost_pad)
# Add an always connected fakesink which respects the clock so the tee
# doesn't fail even if we don't have any outputs.
fakesink = gst.element_factory_make('fakesink')
fakesink.set_property('sync', True)
self._add(fakesink)
def add_output(self, description):
# XXX This only works for pipelines not in use until #790 gets done.
try:
output = gst.parse_bin_from_description(
description, ghost_unconnected_pads=True)
except gobject.GError as ex:
logger.error(
'Failed to create audio output "%s": %s', description, ex)
raise exceptions.AudioException(bytes(ex))
self._add(output)
logger.info('Audio output set to "%s"', description)
def _add(self, element):
queue = gst.element_factory_make('queue')
self.add(element)
self.add(queue)
queue.link(element)
self._tee.link(queue)
class SoftwareMixer(object):
pykka_traversable = True
def __init__(self, mixer):
self._mixer = mixer
self._element = None
self._last_volume = None
self._last_mute = None
self._signals = _Signals()
def setup(self, element, mixer_ref):
self._element = element
self._mixer.setup(mixer_ref)
def teardown(self):
self._signals.clear()
self._mixer.teardown()
def get_volume(self):
return int(round(self._element.get_property('volume') * 100))
def set_volume(self, volume):
self._element.set_property('volume', volume / 100.0)
self._mixer.trigger_volume_changed(self.get_volume())
def get_mute(self):
return self._element.get_property('mute')
def set_mute(self, mute):
self._element.set_property('mute', bool(mute))
self._mixer.trigger_mute_changed(self.get_mute())
class _Handler(object):
def __init__(self, audio):
self._audio = audio
self._element = None
self._pad = None
self._message_handler_id = None
self._event_handler_id = None
def setup_message_handling(self, element):
self._element = element
bus = element.get_bus()
bus.add_signal_watch()
self._message_handler_id = bus.connect('message', self.on_message)
def setup_event_handling(self, pad):
self._pad = pad
self._event_handler_id = pad.add_event_probe(self.on_event)
def teardown_message_handling(self):
bus = self._element.get_bus()
bus.remove_signal_watch()
bus.disconnect(self._message_handler_id)
self._message_handler_id = None
def teardown_event_handling(self):
self._pad.remove_event_probe(self._event_handler_id)
self._event_handler_id = None
def on_message(self, bus, msg):
if msg.type == gst.MESSAGE_STATE_CHANGED and msg.src == self._element:
self.on_playbin_state_changed(*msg.parse_state_changed())
elif msg.type == gst.MESSAGE_BUFFERING:
self.on_buffering(msg.parse_buffering(), msg.structure)
elif msg.type == gst.MESSAGE_EOS:
self.on_end_of_stream()
elif msg.type == gst.MESSAGE_ERROR:
self.on_error(*msg.parse_error())
elif msg.type == gst.MESSAGE_WARNING:
self.on_warning(*msg.parse_warning())
elif msg.type == gst.MESSAGE_ASYNC_DONE:
self.on_async_done()
elif msg.type == gst.MESSAGE_TAG:
self.on_tag(msg.parse_tag())
elif msg.type == gst.MESSAGE_ELEMENT:
if gst.pbutils.is_missing_plugin_message(msg):
self.on_missing_plugin(msg)
def on_event(self, pad, event):
if event.type == gst.EVENT_NEWSEGMENT:
self.on_new_segment(*event.parse_new_segment())
elif event.type == gst.EVENT_SINK_MESSAGE:
# Handle stream changed messages when they reach our output bin.
# If we listen for it on the bus we get one per tee branch.
msg = event.parse_sink_message()
if msg.structure.has_name('playbin2-stream-changed'):
self.on_stream_changed(msg.structure['uri'])
return True
def on_playbin_state_changed(self, old_state, new_state, pending_state):
gst_logger.debug('Got state-changed message: old=%s new=%s pending=%s',
old_state.value_name, new_state.value_name,
pending_state.value_name)
if new_state == gst.STATE_READY and pending_state == gst.STATE_NULL:
# XXX: We're not called on the last state change when going down to
# NULL, so we rewrite the second to last call to get the expected
# behavior.
new_state = gst.STATE_NULL
pending_state = gst.STATE_VOID_PENDING
if pending_state != gst.STATE_VOID_PENDING:
return # Ignore intermediate state changes
if new_state == gst.STATE_READY:
return # Ignore READY state as it's GStreamer specific
new_state = _GST_STATE_MAPPING[new_state]
old_state, self._audio.state = self._audio.state, new_state
target_state = _GST_STATE_MAPPING[self._audio._target_state]
if target_state == new_state:
target_state = None
logger.debug('Audio event: state_changed(old_state=%s, new_state=%s, '
'target_state=%s)', old_state, new_state, target_state)
AudioListener.send('state_changed', old_state=old_state,
new_state=new_state, target_state=target_state)
if new_state == PlaybackState.STOPPED:
logger.debug('Audio event: stream_changed(uri=None)')
AudioListener.send('stream_changed', uri=None)
if 'GST_DEBUG_DUMP_DOT_DIR' in os.environ:
gst.DEBUG_BIN_TO_DOT_FILE(
self._audio._playbin, gst.DEBUG_GRAPH_SHOW_ALL, 'mopidy')
def on_buffering(self, percent, structure=None):
if structure and structure.has_field('buffering-mode'):
if structure['buffering-mode'] == gst.BUFFERING_LIVE:
return # Live sources stall in paused.
level = logging.getLevelName('TRACE')
if percent < 10 and not self._audio._buffering:
self._audio._playbin.set_state(gst.STATE_PAUSED)
self._audio._buffering = True
level = logging.DEBUG
if percent == 100:
self._audio._buffering = False
if self._audio._target_state == gst.STATE_PLAYING:
self._audio._playbin.set_state(gst.STATE_PLAYING)
level = logging.DEBUG
gst_logger.log(level, 'Got buffering message: percent=%d%%', percent)
def on_end_of_stream(self):
gst_logger.debug('Got end-of-stream message.')
logger.debug('Audio event: reached_end_of_stream()')
self._audio._tags = {}
AudioListener.send('reached_end_of_stream')
def on_error(self, error, debug):
gst_logger.error(str(error).decode('utf-8'))
if debug:
gst_logger.debug(debug.decode('utf-8'))
# TODO: is this needed?
self._audio.stop_playback()
def on_warning(self, error, debug):
gst_logger.warning(str(error).decode('utf-8'))
if debug:
gst_logger.debug(debug.decode('utf-8'))
def on_async_done(self):
gst_logger.debug('Got async-done.')
def on_tag(self, taglist):
tags = utils.convert_taglist(taglist)
self._audio._tags.update(tags)
logger.debug('Audio event: tags_changed(tags=%r)', tags.keys())
AudioListener.send('tags_changed', tags=tags.keys())
def on_missing_plugin(self, msg):
desc = gst.pbutils.missing_plugin_message_get_description(msg)
debug = gst.pbutils.missing_plugin_message_get_installer_detail(msg)
gst_logger.debug('Got missing-plugin message: description:%s', desc)
logger.warning('Could not find a %s to handle media.', desc)
if gst.pbutils.install_plugins_supported():
logger.info('You might be able to fix this by running: '
'gst-installer "%s"', debug)
# TODO: store the missing plugins installer info in a file so we can
# can provide a 'mopidy install-missing-plugins' if the system has the
# required helper installed?
def on_new_segment(self, update, rate, format_, start, stop, position):
gst_logger.debug('Got new-segment event: update=%s rate=%s format=%s '
'start=%s stop=%s position=%s', update, rate,
format_.value_name, start, stop, position)
position_ms = position // gst.MSECOND
logger.debug('Audio event: position_changed(position=%s)', position_ms)
AudioListener.send('position_changed', position=position_ms)
def on_stream_changed(self, uri):
gst_logger.debug('Got stream-changed message: uri=%s', uri)
logger.debug('Audio event: stream_changed(uri=%s)', uri)
AudioListener.send('stream_changed', uri=uri)
# TODO: create a player class which replaces the actors internals
class Audio(pykka.ThreadingActor):
"""
Audio output through `GStreamer <http://gstreamer.freedesktop.org/>`_.
"""
#: The GStreamer state mapped to :class:`mopidy.audio.PlaybackState`
state = PlaybackState.STOPPED
#: The software mixing interface :class:`mopidy.audio.actor.SoftwareMixer`
mixer = None
def __init__(self, config, mixer):
super(Audio, self).__init__()
self._config = config
self._target_state = gst.STATE_NULL
self._buffering = False
self._tags = {}
self._playbin = None
self._outputs = None
self._about_to_finish_callback = None
self._handler = _Handler(self)
self._appsrc = _Appsrc()
self._signals = _Signals()
if mixer and self._config['audio']['mixer'] == 'software':
self.mixer = SoftwareMixer(mixer)
def on_start(self):
try:
self._setup_preferences()
self._setup_playbin()
self._setup_outputs()
self._setup_audio_sink()
except gobject.GError as ex:
logger.exception(ex)
process.exit_process()
def on_stop(self):
self._teardown_mixer()
self._teardown_playbin()
def _setup_preferences(self):
# TODO: move out of audio actor?
# Fix for https://github.com/mopidy/mopidy/issues/604
registry = gst.registry_get_default()
jacksink = registry.find_feature(
'jackaudiosink', gst.TYPE_ELEMENT_FACTORY)
if jacksink:
jacksink.set_rank(gst.RANK_SECONDARY)
def _setup_playbin(self):
playbin = gst.element_factory_make('playbin2')
playbin.set_property('flags', 2) # GST_PLAY_FLAG_AUDIO
# TODO: turn into config values...
playbin.set_property('buffer-size', 5 << 20) # 5MB
playbin.set_property('buffer-duration', 5 * gst.SECOND)
self._signals.connect(playbin, 'source-setup', self._on_source_setup)
self._signals.connect(playbin, 'about-to-finish',
self._on_about_to_finish)
self._playbin = playbin
self._handler.setup_message_handling(playbin)
def _teardown_playbin(self):
self._handler.teardown_message_handling()
self._handler.teardown_event_handling()
self._signals.disconnect(self._playbin, 'about-to-finish')
self._signals.disconnect(self._playbin, 'source-setup')
self._playbin.set_state(gst.STATE_NULL)
def _setup_outputs(self):
# We don't want to use outputs for regular testing, so just install
# an unsynced fakesink when someone asks for a 'testoutput'.
if self._config['audio']['output'] == 'testoutput':
self._outputs = gst.element_factory_make('fakesink')
else:
self._outputs = _Outputs()
try:
self._outputs.add_output(self._config['audio']['output'])
except exceptions.AudioException:
process.exit_process() # TODO: move this up the chain
self._handler.setup_event_handling(self._outputs.get_pad('sink'))
def _setup_audio_sink(self):
audio_sink = gst.Bin('audio-sink')
# Queue element to buy us time between the about to finish event and
# the actual switch, i.e. about to switch can block for longer thanks
# to this queue.
# TODO: make the min-max values a setting?
queue = gst.element_factory_make('queue')
queue.set_property('max-size-buffers', 0)
queue.set_property('max-size-bytes', 0)
queue.set_property('max-size-time', 3 * gst.SECOND)
queue.set_property('min-threshold-time', 1 * gst.SECOND)
audio_sink.add(queue)
audio_sink.add(self._outputs)
if self.mixer:
volume = gst.element_factory_make('volume')
audio_sink.add(volume)
queue.link(volume)
volume.link(self._outputs)
self.mixer.setup(volume, self.actor_ref.proxy().mixer)
else:
queue.link(self._outputs)
ghost_pad = gst.GhostPad('sink', queue.get_pad('sink'))
audio_sink.add_pad(ghost_pad)
self._playbin.set_property('audio-sink', audio_sink)
def _teardown_mixer(self):
if self.mixer:
self.mixer.teardown()
def _on_about_to_finish(self, element):
gst_logger.debug('Got about-to-finish event.')
if self._about_to_finish_callback:
logger.debug('Running about to finish callback.')
self._about_to_finish_callback()
def _on_source_setup(self, element, source):
gst_logger.debug('Got source-setup: element=%s', source)
if source.get_factory().get_name() == 'appsrc':
self._appsrc.configure(source)
else:
self._appsrc.reset()
utils.setup_proxy(source, self._config['proxy'])
def set_uri(self, uri):
"""
Set URI of audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param uri: the URI to play
:type uri: string
"""
# XXX: Hack to workaround issue on Mac OS X where volume level
# does not persist between track changes. mopidy/mopidy#886
if self.mixer is not None:
current_volume = self.mixer.get_volume()
else:
current_volume = None
self._tags = {} # TODO: add test for this somehow
self._playbin.set_property('uri', uri)
if self.mixer is not None and current_volume is not None:
self.mixer.set_volume(current_volume)
def set_appsrc(
self, caps, need_data=None, enough_data=None, seek_data=None):
"""
Switch to using appsrc for getting audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param caps: GStreamer caps string describing the audio format to
expect
:type caps: string
:param need_data: callback for when appsrc needs data
:type need_data: callable which takes data length hint in ms
:param enough_data: callback for when appsrc has enough data
:type enough_data: callable
:param seek_data: callback for when data from a new position is needed
to continue playback
:type seek_data: callable which takes time position in ms
"""
self._appsrc.prepare(
gst.Caps(bytes(caps)), need_data, enough_data, seek_data)
self._playbin.set_property('uri', 'appsrc://')
def emit_data(self, buffer_):
"""
Call this to deliver raw audio data to be played.
If the buffer is :class:`None`, the end-of-stream token is put on the
playbin. We will get a GStreamer message when the stream playback
reaches the token, and can then do any end-of-stream related tasks.
Note that the URI must be set to ``appsrc://`` for this to work.
Returns :class:`True` if data was delivered.
:param buffer_: buffer to pass to appsrc
:type buffer_: :class:`gst.Buffer` or :class:`None`
:rtype: boolean
"""
return self._appsrc.push(buffer_)
def emit_end_of_stream(self):
"""
Put an end-of-stream token on the playbin. This is typically used in
combination with :meth:`emit_data`.
We will get a GStreamer message when the stream playback reaches the
token, and can then do any end-of-stream related tasks.
.. deprecated:: 1.0
Use :meth:`emit_data` with a :class:`None` buffer instead.
"""
deprecation.warn('audio.emit_end_of_stream')
self._appsrc.push(None)
def set_about_to_finish_callback(self, callback):
"""
Configure audio to use an about-to-finish callback.
This should be used to achieve gapless playback. For this to work the
callback *MUST* call :meth:`set_uri` with the new URI to play and
block until this call has been made. :meth:`prepare_change` is not
needed before :meth:`set_uri` in this one special case.
:param callable callback: Callback to run when we need the next URI.
"""
self._about_to_finish_callback = callback
def get_position(self):
"""
Get position in milliseconds.
:rtype: int
"""
try:
gst_position = self._playbin.query_position(gst.FORMAT_TIME)[0]
return utils.clocktime_to_millisecond(gst_position)
except gst.QueryError:
# TODO: take state into account for this and possibly also return
# None as the unknown value instead of zero?
logger.debug('Position query failed')
return 0
def set_position(self, position):
"""
Set position in milliseconds.
:param position: the position in milliseconds
:type position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
# TODO: double check seek flags in use.
gst_position = utils.millisecond_to_clocktime(position)
result = self._playbin.seek_simple(
gst.Format(gst.FORMAT_TIME), gst.SEEK_FLAG_FLUSH, gst_position)
gst_logger.debug('Sent flushing seek: position=%s', gst_position)
return result
def start_playback(self):
"""
Notify GStreamer that it should start playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PLAYING)
def pause_playback(self):
"""
Notify GStreamer that it should pause playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PAUSED)
def prepare_change(self):
"""
Notify GStreamer that we are about to change state of playback.
This function *MUST* be called before changing URIs or doing
changes like updating data that is being pushed. The reason for this
is that GStreamer will reset all its state when it changes to
:attr:`gst.STATE_READY`.
"""
return self._set_state(gst.STATE_READY)
def stop_playback(self):
"""
Notify GStreamer that is should stop playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
self._buffering = False
return self._set_state(gst.STATE_NULL)
def wait_for_state_change(self):
"""Block until any pending state changes are complete.
Should only be used by tests.
"""
self._playbin.get_state()
def enable_sync_handler(self):
"""Enable manual processing of messages from bus.
Should only be used by tests.
"""
def sync_handler(bus, message):
self._handler.on_message(bus, message)
return gst.BUS_DROP
bus = self._playbin.get_bus()
bus.set_sync_handler(sync_handler)
def _set_state(self, state):
"""
Internal method for setting the raw GStreamer state.
.. digraph:: gst_state_transitions
graph [rankdir="LR"];
node [fontsize=10];
"NULL" -> "READY"
"PAUSED" -> "PLAYING"
"PAUSED" -> "READY"
"PLAYING" -> "PAUSED"
"READY" -> "NULL"
"READY" -> "PAUSED"
:param state: State to set playbin to. One of: `gst.STATE_NULL`,
`gst.STATE_READY`, `gst.STATE_PAUSED` and `gst.STATE_PLAYING`.
:type state: :class:`gst.State`
:rtype: :class:`True` if successfull, else :class:`False`
"""
self._target_state = state
result = self._playbin.set_state(state)
gst_logger.debug('State change to %s: result=%s', state.value_name,
result.value_name)
if result == gst.STATE_CHANGE_FAILURE:
logger.warning(
'Setting GStreamer state to %s failed', state.value_name)
return False
# TODO: at this point we could already emit stopped event instead
# of faking it in the message handling when result=OK
return True
# TODO: bake this into setup appsrc perhaps?
def set_metadata(self, track):
"""
Set track metadata for currently playing song.
Only needs to be called by sources such as `appsrc` which do not
already inject tags in playbin, e.g. when using :meth:`emit_data` to
deliver raw audio data to GStreamer.
:param track: the current track
:type track: :class:`mopidy.models.Track`
"""
taglist = gst.TagList()
artists = [a for a in (track.artists or []) if a.name]
# Default to blank data to trick shoutcast into clearing any previous
# values it might have.
taglist[gst.TAG_ARTIST] = ' '
taglist[gst.TAG_TITLE] = ' '
taglist[gst.TAG_ALBUM] = ' '
if artists:
taglist[gst.TAG_ARTIST] = ', '.join([a.name for a in artists])
if track.name:
taglist[gst.TAG_TITLE] = track.name
if track.album and track.album.name:
taglist[gst.TAG_ALBUM] = track.album.name
event = gst.event_new_tag(taglist)
# TODO: check if we get this back on our own bus?
self._playbin.send_event(event)
gst_logger.debug('Sent tag event: track=%s', track.uri)
def get_current_tags(self):
"""
Get the currently playing media's tags.
If no tags have been found, or nothing is playing this returns an empty
dictionary. For each set of tags we collect a tags_changed event is
emitted with the keys of the changes tags. After such calls users may
call this function to get the updated values.
:rtype: {key: [values]} dict for the current media.
"""
# TODO: should this be a (deep) copy? most likely yes
# TODO: should we return None when stopped?
# TODO: support only fetching keys we care about?
return self._tags
|
|
import logging
import urllib
import httplib2
import simplejson
import yaml
import json
import random
import time
from datetime import datetime
from datetime import timedelta
import config
from webapp2_extras.appengine.auth.models import User
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from lib.utils import generate_token
from lib.github import github
# user model - extends webapp2 User model
class User(User):
uid = ndb.StringProperty()
username = ndb.StringProperty()
email = ndb.StringProperty()
name = ndb.StringProperty()
timezone = ndb.StringProperty()
country = ndb.StringProperty()
company = ndb.StringProperty()
blogger = ndb.BooleanProperty(default=False)
activated = ndb.BooleanProperty(default=False)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
last_login = ndb.DateTimeProperty()
tfsecret = ndb.StringProperty()
tfenabled = ndb.BooleanProperty(default=False)
@classmethod
def get_by_email(cls, email):
return cls.query(cls.email == email).get()
@classmethod
def get_by_uid(cls, uid):
return cls.query(cls.uid == uid).get()
@classmethod
def get_all(cls):
return cls.query().filter().order(-cls.created).fetch()
# image model
class Image(ndb.Model):
name = ndb.StringProperty()
description = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
url = ndb.StringProperty()
disk_format = ndb.StringProperty()
container_format = ndb.StringProperty()
active = ndb.BooleanProperty(default=False)
dynamic = ndb.BooleanProperty(default=False)
@classmethod
def get_all(cls):
return cls.query().filter().order(cls.created).fetch()
@classmethod
def get_by_name(cls, name):
image_query = cls.query().filter(cls.name == name)
image = image_query.get()
return image
# cloud model
class API(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
name = ndb.StringProperty()
description = ndb.StringProperty()
owner = ndb.KeyProperty(kind=User)
@classmethod
def get_by_user(cls, user):
api_query = cls.query().filter(cls.owner == user).order(-cls.created)
apis = api_query.fetch()
return apis
@classmethod
def get_by_user_name(cls, user, name):
api_query = cls.query().filter(cls.owner == user, cls.name == name)
apis = api_query.get()
return apis
@classmethod
def create_default(cls, userkey, name="Default"):
api = API()
api.name = name
api.description = "Auto generated default."
api.owner = userkey
api.put()
return api
# project model
class Repo(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
name = ndb.StringProperty()
description = ndb.StringProperty()
owner = ndb.KeyProperty(kind=User)
public = ndb.BooleanProperty(default=False)
@classmethod
def get_by_user(cls, user):
query = cls.query().filter(cls.owner == user).order(-cls.created)
results = query.fetch()
return results
@classmethod
def get_by_user_name(cls, user, name):
query = cls.query().filter(cls.owner == user, cls.name == name)
result = query.get()
return result
@classmethod
def get_public(cls):
query = cls.query().filter(cls.public == True)
results = query.fetch()
return results
@classmethod
def get_available(cls, user):
query = cls.query().filter(cls.public == True)
results = query.fetch()
query = cls.query().filter(cls.public != True, cls.owner == user)
for result in query.fetch():
results.append(result)
return results
def sync(self):
message = github.repo_sync_contents(self)
return message
# blog posts and pages
class Article(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
owner = ndb.KeyProperty(kind=User)
title = ndb.StringProperty()
summary = ndb.StringProperty()
filename = ndb.StringProperty()
slug = ndb.StringProperty()
article_type = ndb.StringProperty()
draft = ndb.BooleanProperty(default=True)
@classmethod
def get_all(cls):
article_query = cls.query().filter().order(-cls.created)
articles = article_query.fetch()
return articles
@classmethod
def get_blog_posts(cls, num_articles=1, offset=0):
article_query = cls.query().filter(cls.article_type == 'post', cls.draft == False).order(-cls.created)
articles = article_query.fetch(limit=num_articles)
return articles
@classmethod
def get_by_user(cls, user):
article_query = cls.query().filter(cls.owner == user).order(-Article.created)
articles = article_query.fetch()
return articles
@classmethod
def get_by_type(cls, article_type):
article_query = cls.query().filter(cls.article_type == article_type).order(-Article.created)
articles = article_query.fetch()
return articles
@classmethod
def get_by_slug(cls, slug):
article_query = cls.query().filter(cls.slug == slug)
article = article_query.get()
return article
# log tracking pings
class LogTracking(ndb.Model):
timestamp = ndb.DateTimeProperty(auto_now_add=True)
message = ndb.StringProperty()
ip = ndb.StringProperty()
# log visits
class LogVisit(ndb.Model):
timestamp = ndb.DateTimeProperty(auto_now_add=True)
user = ndb.KeyProperty(kind=User)
message = ndb.StringProperty()
uastring = ndb.StringProperty()
ip = ndb.StringProperty()
# log outgoing emails
class LogEmail(ndb.Model):
sender = ndb.StringProperty(required=True)
to = ndb.StringProperty(required=True)
subject = ndb.StringProperty(required=True)
body = ndb.TextProperty()
when = ndb.DateTimeProperty()
|
|
import sys
import re
import os
import json
RESET = '\033[0m'
def make_std_color(No):
# defined for 1 through 7
return '\033[3' + No+ 'm'
def make_color(No):
# defined for 1 through 255
return '\033[38;5;'+ No + 'm'
WRN_COLOR = make_std_color('3')
ERR_COLOR = make_std_color('1')
STD_COLOR = make_color('8')
################################################################################
### @brief length of the swagger definition namespace
################################################################################
defLen = len('#/definitions/')
################################################################################
### @brief facility to remove leading and trailing html-linebreaks
################################################################################
removeTrailingBR = re.compile("<br>$")
removeLeadingBR = re.compile("^<br>")
def brTrim(text):
return removeLeadingBR.sub("", removeTrailingBR.sub("", text.strip(' ')))
swagger = None
fileFilter = None
blockFilter = None
dokuBlocks = [{},{}]
thisVerb = {}
route = ''
verb = ''
def getReference(name, source, verb):
try:
ref = name['$ref'][defLen:]
except Exception as x:
print >>sys.stderr, ERR_COLOR + "No reference in: " + name + RESET
raise
if not ref in swagger['definitions']:
fn = ''
if verb:
fn = swagger['paths'][route][verb]['x-filename']
else:
fn = swagger['definitions'][source]['x-filename']
print >> sys.stderr, STD_COLOR + json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True) + RESET
raise Exception("invalid reference: " + ref + " in " + fn)
return ref
removeDoubleLF = re.compile("\n\n")
removeLF = re.compile("\n")
def TrimThisParam(text, indent):
text = text.rstrip('\n').lstrip('\n')
text = removeDoubleLF.sub("\n", text)
if (indent > 0):
indent = (indent + 2) # align the text right of the list...
return removeLF.sub("\n" + ' ' * indent, text)
def unwrapPostJson(reference, layer):
global swagger
rc = ''
for param in swagger['definitions'][reference]['properties'].keys():
thisParam = swagger['definitions'][reference]['properties'][param]
required = ('required' in swagger['definitions'][reference] and
param in swagger['definitions'][reference]['required'])
if '$ref' in thisParam:
subStructRef = getReference(thisParam, reference, None)
rc += ' ' * layer + " - **" + param + "**:\n"
rc += unwrapPostJson(subStructRef, layer + 1)
elif thisParam['type'] == 'object':
rc += ' ' * layer + " - **" + param + "**: " + TrimThisParam(brTrim(thisParam['description']), layer) + "\n"
elif swagger['definitions'][reference]['properties'][param]['type'] == 'array':
rc += ' ' * layer + " - **" + param + "**"
trySubStruct = False
if 'type' in thisParam['items']:
rc += " (" + thisParam['items']['type'] + ")"
else:
if len(thisParam['items']) == 0:
rc += " (anonymous json object)"
else:
trySubStruct = True
rc += ": " + TrimThisParam(brTrim(thisParam['description']), layer)
if trySubStruct:
try:
subStructRef = getReference(thisParam['items'], reference, None)
except:
print >>sys.stderr, ERR_COLOR + "while analyzing: " + param + RESET
print >>sys.stderr, WRN_COLOR + thisParam + RESET
rc += "\n" + unwrapPostJson(subStructRef, layer + 1)
else:
rc += ' ' * layer + " - **" + param + "**: " + TrimThisParam(thisParam['description'], layer) + '\n'
return rc
def getRestBodyParam():
rc = "\n**Body Parameters**\n"
addText = ''
for nParam in range(0, len(thisVerb['parameters'])):
if thisVerb['parameters'][nParam]['in'] == 'body':
descOffset = thisVerb['parameters'][nParam]['x-description-offset']
addText = ''
if 'additionalProperties' not in thisVerb['parameters'][nParam]['schema']:
addText = unwrapPostJson(
getReference(thisVerb['parameters'][nParam]['schema'], route, verb),0)
rc += addText
return rc
def getRestDescription():
#print >>sys.stderr, "RESTDESCRIPTION"
if thisVerb['description']:
#print >> sys.stderr, thisVerb['description']
return thisVerb['description']
else:
#print >> sys.stderr, "ELSE"
return ""
def getRestReplyBodyParam(param):
rc = "\n**Response Body**\n"
try:
rc += unwrapPostJson(getReference(thisVerb['responses'][param]['schema'], route, verb), 0)
except Exception:
print >>sys.stderr, ERR_COLOR + "failed to search " + param + " in: " + RESET
print >>sys.stderr, WRN_COLOR + json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True) + RESET
raise
return rc + "\n"
SIMPL_REPL_DICT = {
"\\" : "\\\\",
"@RESTDESCRIPTION" : getRestDescription,
"@RESTURLPARAMETERS" : "\n**Path Parameters**\n",
"@RESTQUERYPARAMETERS" : "\n**Query Parameters**\n",
"@RESTHEADERPARAMETERS" : "\n**Header Parameters**\n",
"@RESTRETURNCODES" : "\n**Return Codes**\n",
"@PARAMS" : "\n**Parameters**\n",
"@RESTPARAMS" : "",
"@RESTURLPARAMS" : "\n**Path Parameters**\n",
"@RESTQUERYPARAMS" : "\n**Query Parameters**\n",
"@RESTBODYPARAM" : "", #getRestBodyParam,
"@RESTREPLYBODY" : getRestReplyBodyParam,
"@RESTQUERYPARAM" : "@RESTPARAM",
"@RESTURLPARAM" : "@RESTPARAM",
"@PARAM" : "@RESTPARAM",
"@RESTHEADERPARAM" : "@RESTPARAM",
"@EXAMPLES" : "\n**Examples**\n",
"@RESTPARAMETERS" : ""
}
SIMPLE_RX = re.compile(
r'''
\\| # the backslash...
@RESTDESCRIPTION| # -> <empty>
@RESTURLPARAMETERS| # -> \n**Path Parameters**\n
@RESTQUERYPARAMETERS| # -> \n**Query Parameters**\n
@RESTHEADERPARAMETERS| # -> \n**Header Parameters**\n
@RESTBODYPARAM| # empty now, comes with the post body -> call post body param
@RESTRETURNCODES| # -> \n**Return Codes**\n
@PARAMS| # -> \n**Parameters**\n
@RESTPARAMS| # -> <empty>
@RESTURLPARAMS| # -> <empty>
@RESTQUERYPARAMS| # -> <empty>
@PARAM| # -> @RESTPARAM
@RESTURLPARAM| # -> @RESTPARAM
@RESTQUERYPARAM| # -> @RESTPARAM
@RESTHEADERPARAM| # -> @RESTPARAM
@EXAMPLES| # -> \n**Examples**\n
@RESTPARAMETERS| # -> <empty>
@RESTREPLYBODY\{(.*)\} # -> call body function
''', re.X)
def SimpleRepl(match):
m = match.group(0)
# print 'xxxxx [%s]' % m
try:
n = SIMPL_REPL_DICT[m]
if n == None:
raise Exception("failed to find regex while searching for: " + m)
else:
if type(n) == type(''):
return n
else:
return n()
except Exception:
pos = m.find('{')
if pos > 0:
newMatch = m[:pos]
param = m[pos + 1 :].rstrip(' }')
try:
n = SIMPL_REPL_DICT[newMatch]
if n == None:
raise Exception("failed to find regex while searching for: " +
newMatch + " extracted from: " + m)
else:
if type(n) == type(''):
return n
else:
return n(param)
except Exception as x:
#raise Exception("failed to find regex while searching for: " +
# newMatch + " extracted from: " + m)
raise
else:
raise Exception("failed to find regex while searching for: " + m)
RX = [
(re.compile(r"<!--(\s*.+\s)-->"), ""),
# remove the placeholder BR's again
(re.compile(r"<br />\n"), "\n"),
# multi line bullet lists should become one
(re.compile(r"\n\n-"), "\n-"),
#HTTP API changing code
# unwrap multi-line-briefs: (up to 3 lines supported by now ;-)
(re.compile(r"@brief(.+)\n(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2> \g<3>\n\n"),
(re.compile(r"@brief(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2>\n\n"),
# if there is an @brief above a RESTHEADER, swap the sequence
(re.compile(r"@brief(.+\n*)\n@RESTHEADER{([#\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<3>\n\g<1>\n\n`\g<2>`"),
# else simply put it into the text
(re.compile(r"@brief(.+)"), r"\g<1>"),
# there should be no RESTHEADER without brief, so we will fail offensively if by not doing
#(re.compile(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<2>\n`\g<1>`"),
# Format error codes from errors.dat
(re.compile(r"#####+\n"), r""),
(re.compile(r"## (.+\n\n)## (.+\n)"), r"## \g<1>\g<2>"),
# (re.compile(r"- (\w+):\s*@LIT{(.+)}"), r"\n*\g<1>* - **\g<2>**:"),
(re.compile(r"(.+),(\d+),\"(.+)\",\"(.+)\""), r'\n* <a name="\g<1>"></a>**\g<2>** - **\g<1>**<br>\n \g<4>'),
(re.compile(r"TODOSWAGGER.*"),r"")
]
# (re.compile(r"@RESTPARAM{([\s\w-]*),([\s\w\_\|-]*),\s*(\w+)}"), r"* *\g<1>*:"),
# (re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:"),
# (re.compile(r"@RESTBODYPARAMS{(.*)}"), r"*(\g<1>)*"),
RX2 = [
# parameters - extract their type and whether mandatory or not.
(re.compile(r"@RESTPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"* *\g<1>* (\g<3>):"),
(re.compile(r"@RESTALLBODYPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"\n**Request Body** (\g<3>)\n\n"),
(re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:")
]
match_RESTHEADER = re.compile(r"@RESTHEADER\{(.*)\}")
match_RESTRETURNCODE = re.compile(r"@RESTRETURNCODE\{(.*)\}")
have_RESTBODYPARAM = re.compile(r"@RESTBODYPARAM|@RESTDESCRIPTION")
have_RESTREPLYBODY = re.compile(r"@RESTREPLYBODY")
have_RESTSTRUCT = re.compile(r"@RESTSTRUCT")
remove_MULTICR = re.compile(r'\n\n\n*')
def _mkdir_recursive(path):
sub_path = os.path.dirname(path)
if not os.path.exists(sub_path):
_mkdir_recursive(sub_path)
if not os.path.exists(path):
os.mkdir(path)
def replaceCode(lines, blockName):
global swagger, thisVerb, route, verb
thisVerb = {}
foundRest = False
# first find the header:
headerMatch = match_RESTHEADER.search(lines)
if headerMatch and headerMatch.lastindex > 0:
foundRest = True
try:
(verb,route) = headerMatch.group(1).split(',')[0].split(' ')
verb = verb.lower()
except:
print >> sys.stderr, ERR_COLOR + "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName + RESET
raise
try:
thisVerb = swagger['paths'][route][verb]
except:
print >> sys.stderr, ERR_COLOR + "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName + RESET
print >> sys.stderr, WRN_COLOR + lines + RESET
print >> sys.stderr, "Did you forget to run utils/generateSwagger.sh?"
raise
for (oneRX, repl) in RX:
lines = oneRX.sub(repl, lines)
if foundRest:
rcCode = None
foundRestBodyParam = False
foundRestReplyBodyParam = False
lineR = lines.split('\n')
#print lineR
l = len(lineR)
r = 0
while (r < l):
# remove all but the first RESTBODYPARAM:
if have_RESTBODYPARAM.search(lineR[r]):
if foundRestBodyParam:
lineR[r] = ''
else:
lineR[r] = '@RESTDESCRIPTION'
foundRestBodyParam = True
r+=1
while ((len(lineR[r]) > 0) and
((lineR[r][0] != '@') or
have_RESTBODYPARAM.search(lineR[r]))):
# print "xxx - %d %s" %(len(lineR[r]), lineR[r])
lineR[r] = ''
r+=1
m = match_RESTRETURNCODE.search(lineR[r])
if m and m.lastindex > 0:
rcCode = m.group(1)
# remove all but the first RESTREPLYBODY:
if have_RESTREPLYBODY.search(lineR[r]):
if foundRestReplyBodyParam != rcCode:
lineR[r] = '@RESTREPLYBODY{' + rcCode + '}\n'
else:
lineR[r] = ''
foundRestReplyBodyParam = rcCode
r+=1
while (len(lineR[r]) > 1):
lineR[r] = ''
r+=1
m = match_RESTRETURNCODE.search(lineR[r])
if m and m.lastindex > 0:
rcCode = m.group(1)
# remove all RESTSTRUCTS - they're referenced anyways:
if have_RESTSTRUCT.search(lineR[r]):
while (len(lineR[r]) > 1):
lineR[r] = ''
r+=1
r+=1
lines = "\n".join(lineR)
#print "x" * 70
#print lines
lines = SIMPLE_RX.sub(SimpleRepl, lines)
for (oneRX, repl) in RX2:
lines = oneRX.sub(repl, lines)
lines = remove_MULTICR.sub("\n\n", lines)
#print lines
return lines
def replaceCodeIndex(lines):
lines = re.sub(r"<!--(\s*.+\s)-->","", lines)
#HTTP API changing code
#lines = re.sub(r"@brief(.+)",r"\g<1>", lines)
#lines = re.sub(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}", r"###\g<2>\n`\g<1>`", lines)
return lines
RXUnEscapeMDInLinks = re.compile("\\\\_")
def setAnchor(param):
unescapedParam = RXUnEscapeMDInLinks.sub("_", param)
return "<a name=\"" + unescapedParam + "\">#</a>"
RXFinal = [
(re.compile(r"@anchor (.*)"), setAnchor),
]
def replaceCodeFullFile(lines):
for (oneRX, repl) in RXFinal:
lines = oneRX.sub(repl, lines)
return lines
################################################################################
# main loop over all files
################################################################################
def walk_on_files(inDirPath, outDirPath):
global fileFilter
count = 0
skipped = 0
for root, dirs, files in os.walk(inDirPath):
for file in files:
if file.endswith(".md") and not file.endswith("SUMMARY.md"):
count += 1
inFileFull = os.path.join(root, file)
outFileFull = os.path.join(outDirPath, inFileFull)
if fileFilter != None:
if fileFilter.match(inFileFull) == None:
skipped += 1
# print "Skipping %s -> %s" % (inFileFull, outFileFull)
continue;
# print "%s -> %s" % (inFileFull, outFileFull)
_mkdir_recursive(os.path.join(outDirPath, root))
findStartCode(inFileFull, outFileFull)
print STD_COLOR + "Processed %d files, skipped %d" % (count, skipped) + RESET
def findStartCode(inFileFull, outFileFull):
inFD = open(inFileFull, "r")
textFile = inFD.read()
inFD.close()
#print "-" * 80
#print textFile
matchInline = re.findall(r'@startDocuBlockInline\s*(\w+)', textFile)
if matchInline:
for find in matchInline:
#print "7"*80
#print inFileFull + " " + find
textFile = replaceTextInline(textFile, inFileFull, find)
#print textFile
match = re.findall(r'@startDocuBlock\s*(\w+)', textFile)
if match:
for find in match:
#print "8"*80
#print find
textFile = replaceText(textFile, inFileFull, find)
#print textFile
try:
textFile = replaceCodeFullFile(textFile)
except:
print >>sys.stderr, ERR_COLOR + "while parsing : " + inFileFull + RESET
raise
#print "9" * 80
#print textFile
outFD = open(outFileFull, "w")
outFD.write(textFile)
outFD.close()
#JSF_put_api_replication_synchronize
def replaceText(text, pathOfFile, searchText):
''' inserts docublocks into md '''
#print '7'*80
global dokuBlocks
if not searchText in dokuBlocks[0]:
print >> sys.stderr, ERR_COLOR + "Failed to locate the docublock '" + searchText + "' for replacing it into the file '" +pathOfFile + "'\n have:" + RESET
print >> sys.stderr, WRN_COLOR + dokuBlocks[0].keys() + RESET
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
print >> sys.stderr, WRN_COLOR + text + RESET
print >> sys.stderr, ERR_COLOR + "Failed to locate the docublock '" + searchText + "' for replacing it into the file '" +pathOfFile + "' For details scroll up!" + RESET
exit(1)
#print '7'*80
#print dokuBlocks[0][searchText]
#print '7'*80
rc= re.sub("@startDocuBlock\s+"+ searchText + "(?:\s+|$)", dokuBlocks[0][searchText], text)
return rc
def replaceTextInline(text, pathOfFile, searchText):
''' inserts docublocks into md '''
global dokuBlocks
if not searchText in dokuBlocks[1]:
print >> sys.stderr, ERR_COLOR + "Failed to locate the inline docublock '" + searchText + "' for replacing it into the file '" + pathOfFile + "'\n have: " + RESET
print >> sys.stderr, "%s%s%s" %(WRN_COLOR, dokuBlocks[1].keys(), RESET)
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
print >> sys.stderr, WRN_COLOR + text + RESET
print >> sys.stderr, ERR_COLOR + "Failed to locate the inline docublock '" + searchText + "' for replacing it into the file '" + pathOfFile + "' For details scroll up!" + RESET
exit(1)
rePattern = r'(?s)\s*@startDocuBlockInline\s+'+ searchText +'\s.*?@endDocuBlock\s' + searchText
# (?s) is equivalent to flags=re.DOTALL but works in Python 2.6
match = re.search(rePattern, text)
if (match == None):
print >> sys.stderr, ERR_COLOR + "failed to match with '" + rePattern + "' for " + searchText + " in file " + pathOfFile + " in: \n" + text + RESET
exit(1)
subtext = match.group(0)
if (len(re.findall('@startDocuBlock', subtext)) > 1):
print >> sys.stderr, ERR_COLOR + "failed to snap with '" + rePattern + "' on end docublock for " + searchText + " in " + pathOfFile + " our match is:\n" + subtext + RESET
exit(1)
return re.sub(rePattern, dokuBlocks[1][searchText], text)
################################################################################
# Read the docublocks into memory
################################################################################
thisBlock = ""
thisBlockName = ""
thisBlockType = 0
STATE_SEARCH_START = 0
STATE_SEARCH_END = 1
SEARCH_START = re.compile(r" *start[0-9a-zA-Z]*\s\s*([0-9a-zA-Z_ ]*)\s*$")
def readStartLine(line):
global thisBlockName, thisBlockType, thisBlock, dokuBlocks
if ("@startDocuBlock" in line):
if "@startDocuBlockInline" in line:
thisBlockType = 1
else:
thisBlockType = 0
try:
thisBlockName = SEARCH_START.search(line).group(1).strip()
except:
print >> sys.stderr, ERR_COLOR + "failed to read startDocuBlock: [" + line + "]" + RESET
exit(1)
dokuBlocks[thisBlockType][thisBlockName] = ""
return STATE_SEARCH_END
return STATE_SEARCH_START
def readNextLine(line):
global thisBlockName, thisBlockType, thisBlock, dokuBlocks
if '@endDocuBlock' in line:
return STATE_SEARCH_START
dokuBlocks[thisBlockType][thisBlockName] += line
#print "reading " + thisBlockName
#print dokuBlocks[thisBlockType][thisBlockName]
return STATE_SEARCH_END
def loadDokuBlocks():
state = STATE_SEARCH_START
f=open("allComments.txt", 'rU')
count = 0
for line in f.readlines():
if state == STATE_SEARCH_START:
state = readStartLine(line)
elif state == STATE_SEARCH_END:
state = readNextLine(line)
#if state == STATE_SEARCH_START:
# print dokuBlocks[thisBlockType].keys()
if blockFilter != None:
remainBlocks= {}
print STD_COLOR + "filtering blocks" + RESET
for oneBlock in dokuBlocks[0]:
if blockFilter.match(oneBlock) != None:
print "%sfound block %s%s" % (STD_COLOR, oneBlock, RESET)
#print dokuBlocks[0][oneBlock]
remainBlocks[oneBlock] = dokuBlocks[0][oneBlock]
dokuBlocks[0] = remainBlocks
for oneBlock in dokuBlocks[0]:
try:
#print "processing %s" % oneBlock
dokuBlocks[0][oneBlock] = replaceCode(dokuBlocks[0][oneBlock], oneBlock)
#print "6"*80
#print dokuBlocks[0][oneBlock]
#print "6"*80
except:
print >>sys.stderr, ERR_COLOR + "while parsing :\n" + oneBlock + RESET
raise
for oneBlock in dokuBlocks[1]:
try:
dokuBlocks[1][oneBlock] = replaceCode(dokuBlocks[1][oneBlock], oneBlock)
except:
print >>sys.stderr, WRN_COLOR + "while parsing :\n" + oneBlock + RESET
raise
if __name__ == '__main__':
if len(sys.argv) < 2:
print("usage: input-directory output-directory swaggerJson [filter]")
exit(1)
inDir = sys.argv[1]
outDir = sys.argv[2]
swaggerJson = sys.argv[3]
if len(sys.argv) > 4 and sys.argv[4].strip() != '':
print STD_COLOR + "filtering " + sys.argv[4] + RESET
fileFilter = re.compile(sys.argv[4])
if len(sys.argv) > 5 and sys.argv[5].strip() != '':
print STD_COLOR + "filtering Docublocks: " + sys.argv[5] + RESET
blockFilter = re.compile(sys.argv[5])
f=open(swaggerJson, 'rU')
swagger= json.load(f)
f.close()
loadDokuBlocks()
print "%sloaded %d / %d docu blocks%s" % (STD_COLOR, len(dokuBlocks[0]), len(dokuBlocks[1]), RESET)
#print dokuBlocks[0].keys()
walk_on_files(inDir, outDir)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from abc import ABCMeta
from BaseHTTPServer import BaseHTTPRequestHandler
from Cookie import CookieError, BaseCookie, SimpleCookie
import cgi
from datetime import datetime
import errno
from hashlib import md5
import new
import mimetypes
import os
import re
import socket
from StringIO import StringIO
import sys
import urlparse
from genshi.builder import Fragment
from trac.core import Interface, TracBaseError
from trac.util import get_last_traceback, lazy, unquote
from trac.util.datefmt import http_date, localtz
from trac.util.text import empty, exception_to_unicode, to_unicode
from trac.util.translation import _
from trac.web.href import Href
from trac.web.wsgi import _FileWrapper
class IAuthenticator(Interface):
"""Extension point interface for components that can provide the name
of the remote user."""
def authenticate(req):
"""Return the name of the remote user, or `None` if the identity of the
user is unknown."""
class IRequestHandler(Interface):
"""Decide which `trac.core.Component` handles which `Request`, and how.
The boolean property `is_valid_default_handler` determines whether the
`IRequestFilter` can be used as a `default_handler` and defaults to
`True`. To be suitable as a `default_handler`, an `IRequestFilter` must
return an HTML document and `data` dictionary for rendering the document,
and must not require that `match_request` be called prior to
`process_request`.
The boolean property `jquery_noconflict` determines whether jQuery's
`noConflict` mode will be activated by the handler, and defaults to
`False`.
"""
def match_request(req):
"""Return whether the handler wants to process the given request."""
def process_request(req):
"""Process the request.
Return a `(template_name, data, content_type)` tuple,
where `data` is a dictionary of substitutions for the Genshi template.
"text/html" is assumed if `content_type` is `None`.
Note that if template processing should not occur, this method can
simply send the response itself and not return anything.
:Since 1.0: Clearsilver templates are no longer supported.
:Since 1.1.2: the rendering `method` (xml, xhtml or text) may be
returned as a fourth parameter in the tuple, but if not specified
it will be inferred from the `content_type` when rendering the
template.
"""
def is_valid_default_handler(handler):
"""Returns `True` if the `handler` is a valid default handler, as
described in the `IRequestHandler` interface documentation.
"""
return handler and getattr(handler, 'is_valid_default_handler', True)
class IRequestFilter(Interface):
"""Enable components to interfere with the processing done by the
main handler, either before and/or after it enters in action.
"""
def pre_process_request(req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request.
Always returns the request handler, even if unchanged.
"""
def post_process_request(req, template, data, content_type, method=None):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing the Genshi
template or mime type.
`data` may be updated in place.
Always returns a tuple of (template, data, content_type), even if
unchanged.
Note that `template`, `data`, `content_type` will be `None` if:
- called when processing an error page
- the default request handler did not return any result
:Since 0.11: there's a `data` argument for supporting Genshi templates;
this introduced a difference in arity which made it possible to
distinguish between the IRequestFilter components still targeted
at ClearSilver templates and the newer ones targeted at Genshi
templates.
:Since 1.0: Clearsilver templates are no longer supported.
:Since 1.1.2: the rendering `method` will be passed if it is returned
by the request handler, otherwise `method` will be `None`. For
backward compatibility, the parameter is optional in the
implementation's signature.
"""
class ITemplateStreamFilter(Interface):
"""Transform the generated content by filtering the Genshi event stream
generated by the template, prior to its serialization.
"""
def filter_stream(req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi_ documentation for more information.
.. _Genshi: http://genshi.edgewall.org/wiki/Documentation/filters.html
"""
HTTP_STATUS = dict([(code, reason.title()) for code, (reason, description)
in BaseHTTPRequestHandler.responses.items()])
class HTTPException(TracBaseError):
__metaclass__ = ABCMeta
def __init__(self, detail, *args):
"""Factory for HTTPException classes."""
if isinstance(detail, TracBaseError):
self.detail = detail.message
self.reason = detail.title
else:
self.detail = detail
if args:
self.detail = self.detail % args
super(HTTPException, self).__init__('%s %s (%s)' % (self.code,
self.reason,
self.detail))
@property
def message(self):
# The message is based on the e.detail, which can be an Exception
# object, but not a TracError one: when creating HTTPException,
# a TracError.message is directly assigned to e.detail
if isinstance(self.detail, Exception): # not a TracBaseError
message = exception_to_unicode(self.detail)
elif isinstance(self.detail, Fragment): # TracBaseError markup
message = self.detail
else:
message = to_unicode(self.detail)
return message
@property
def title(self):
try:
# We first try to get localized error messages here, but we
# should ignore secondary errors if the main error was also
# due to i18n issues
title = _("Error")
if self.reason:
if title.lower() in self.reason.lower():
title = self.reason
else:
title = _("Error: %(message)s", message=self.reason)
except Exception:
title = "Error"
return title
@classmethod
def subclass(cls, name, code):
"""Create a new Exception class representing a HTTP status code."""
reason = HTTP_STATUS.get(code, 'Unknown')
new_class = new.classobj(name, (HTTPException,), {
'__doc__': 'Exception for HTTP %d %s' % (code, reason)
})
new_class.code = code
new_class.reason = reason
return new_class
_HTTPException_subclass_names = []
for code in [code for code in HTTP_STATUS if code >= 400]:
exc_name = HTTP_STATUS[code].replace(' ', '').replace('-', '')
# 2.5 compatibility hack:
if exc_name == 'InternalServerError':
exc_name = 'InternalError'
if exc_name.lower().startswith('http'):
exc_name = exc_name[4:]
exc_name = 'HTTP' + exc_name
setattr(sys.modules[__name__], exc_name,
HTTPException.subclass(exc_name, code))
_HTTPException_subclass_names.append(exc_name)
del code, exc_name
class _FieldStorage(cgi.FieldStorage):
"""Our own version of cgi.FieldStorage, with tweaks."""
def read_multi(self, *args, **kwargs):
try:
cgi.FieldStorage.read_multi(self, *args, **kwargs)
except ValueError:
# Most likely "Invalid boundary in multipart form",
# possibly an upload of a .mht file? See #9880.
self.read_single()
class _RequestArgs(dict):
"""Dictionary subclass that provides convenient access to request
parameters that may contain multiple values."""
def getfirst(self, name, default=None):
"""Return the first value for the specified parameter, or `default` if
the parameter was not provided.
"""
if name not in self:
return default
val = self[name]
if isinstance(val, list):
val = val[0]
return val
def getlist(self, name):
"""Return a list of values for the specified parameter, even if only
one value was provided.
"""
if name not in self:
return []
val = self[name]
if not isinstance(val, list):
val = [val]
return val
def parse_arg_list(query_string):
"""Parse a query string into a list of `(name, value)` tuples.
:Since 1.1.2: a leading `?` is stripped from `query_string`."""
args = []
if not query_string:
return args
query_string = query_string.lstrip('?')
for arg in query_string.split('&'):
nv = arg.split('=', 1)
if len(nv) == 2:
(name, value) = nv
else:
(name, value) = (nv[0], empty)
name = unquote(name.replace('+', ' '))
if isinstance(name, str):
name = unicode(name, 'utf-8')
value = unquote(value.replace('+', ' '))
if isinstance(value, str):
value = unicode(value, 'utf-8')
args.append((name, value))
return args
def arg_list_to_args(arg_list):
"""Convert a list of `(name, value)` tuples into into a `_RequestArgs`."""
args = _RequestArgs()
for name, value in arg_list:
if name in args:
if isinstance(args[name], list):
args[name].append(value)
else:
args[name] = [args[name], value]
else:
args[name] = value
return args
class RequestDone(TracBaseError):
"""Marker exception that indicates whether request processing has completed
and a response was sent.
"""
iterable = None
def __init__(self, iterable=None):
self.iterable = iterable
class Cookie(SimpleCookie):
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
SimpleCookie.load(self, rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
# If a key appears multiple times, the first occurrence has the
# narrowest scope, keep that
if key in self:
return
try:
self._strict_set(key, real_value, coded_value)
except CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class Request(object):
"""Represents a HTTP request/response pair.
This class provides a convenience API over WSGI.
"""
def __init__(self, environ, start_response):
"""Create the request wrapper.
:param environ: The WSGI environment dict
:param start_response: The WSGI callback for starting the response
:param callbacks: A dictionary of functions that are used to lazily
evaluate attribute lookups
"""
self.environ = environ
self._start_response = start_response
self._write = None
self._status = '200 OK'
self._response = None
self._outheaders = []
self._outcharset = None
self.outcookie = Cookie()
self.callbacks = {
'arg_list': Request._parse_arg_list,
'args': lambda req: arg_list_to_args(req.arg_list),
'languages': Request._parse_languages,
'incookie': Request._parse_cookies,
'_inheaders': Request._parse_headers
}
self.redirect_listeners = []
self.base_url = self.environ.get('trac.base_url')
if not self.base_url:
self.base_url = self._reconstruct_url()
self.href = Href(self.base_path)
self.abs_href = Href(self.base_url)
def __getattr__(self, name):
"""Performs lazy attribute lookup by delegating to the functions in the
callbacks dictionary."""
if name in self.callbacks:
value = self.callbacks[name](self)
setattr(self, name, value)
return value
raise AttributeError(name)
def __repr__(self):
uri = self.environ.get('PATH_INFO', '')
qs = self.query_string
if qs:
uri += '?' + qs
return '<%s "%s %r">' % (self.__class__.__name__, self.method, uri)
# Public API
@lazy
def is_xhr(self):
"""Returns `True` if the request is an `XMLHttpRequest`.
:since: 1.1.6
"""
return self.get_header('X-Requested-With') == 'XMLHttpRequest'
@property
def method(self):
"""The HTTP method of the request"""
return self.environ['REQUEST_METHOD']
@property
def path_info(self):
"""Path inside the application"""
path_info = self.environ.get('PATH_INFO', '')
try:
return unicode(path_info, 'utf-8')
except UnicodeDecodeError:
raise HTTPNotFound(_("Invalid URL encoding (was %(path_info)r)",
path_info=path_info))
@property
def query_string(self):
"""Query part of the request"""
return self.environ.get('QUERY_STRING', '')
@property
def remote_addr(self):
"""IP address of the remote user"""
return self.environ.get('REMOTE_ADDR')
@property
def remote_user(self):
""" Name of the remote user.
Will be `None` if the user has not logged in using HTTP authentication.
"""
user = self.environ.get('REMOTE_USER')
if user is not None:
return to_unicode(user)
@property
def scheme(self):
"""The scheme of the request URL"""
return self.environ['wsgi.url_scheme']
@property
def base_path(self):
"""The root path of the application"""
return self.environ.get('SCRIPT_NAME', '')
@property
def server_name(self):
"""Name of the server"""
return self.environ['SERVER_NAME']
@property
def server_port(self):
"""Port number the server is bound to"""
return int(self.environ['SERVER_PORT'])
def add_redirect_listener(self, listener):
"""Add a callable to be called prior to executing a redirect.
The callable is passed the arguments to the `redirect()` call.
"""
self.redirect_listeners.append(listener)
def get_header(self, name):
"""Return the value of the specified HTTP header, or `None` if there's
no such header in the request.
"""
name = name.lower()
for key, value in self._inheaders:
if key == name:
return value
return None
def send_response(self, code=200):
"""Set the status code of the response."""
self._status = '%s %s' % (code, HTTP_STATUS.get(code, 'Unknown'))
def send_header(self, name, value):
"""Send the response header with the specified name and value.
`value` must either be an `unicode` string or can be converted to one
(e.g. numbers, ...)
"""
lower_name = name.lower()
if lower_name == 'content-type':
ctpos = value.find('charset=')
if ctpos >= 0:
self._outcharset = value[ctpos + 8:].strip()
elif lower_name == 'content-length':
self._content_length = int(value)
self._outheaders.append((name, unicode(value).encode('utf-8')))
def end_headers(self):
"""Must be called after all headers have been sent and before the
actual content is written.
"""
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders)
def check_modified(self, datetime, extra=''):
"""Check the request "If-None-Match" header against an entity tag.
The entity tag is generated from the specified last modified time
(`datetime`), optionally appending an `extra` string to
indicate variants of the requested resource.
That `extra` parameter can also be a list, in which case the MD5 sum
of the list content will be used.
If the generated tag matches the "If-None-Match" header of the request,
this method sends a "304 Not Modified" response to the client.
Otherwise, it adds the entity tag as an "ETag" header to the response
so that consecutive requests can be cached.
"""
if isinstance(extra, list):
m = md5()
for elt in extra:
m.update(repr(elt))
extra = m.hexdigest()
etag = 'W/"%s/%s/%s"' % (self.authname, http_date(datetime), extra)
inm = self.get_header('If-None-Match')
if not inm or inm != etag:
self.send_header('ETag', etag)
else:
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
_trident_re = re.compile(r' Trident/([0-9]+)')
def redirect(self, url, permanent=False):
"""Send a redirect to the client, forwarding to the specified URL.
The `url` may be relative or absolute, relative URLs will be translated
appropriately.
"""
for listener in self.redirect_listeners:
listener(self, url, permanent)
if permanent:
status = 301 # 'Moved Permanently'
elif self.method == 'POST':
status = 303 # 'See Other' -- safe to use in response to a POST
else:
status = 302 # 'Found' -- normal temporary redirect
self.send_response(status)
if not url.startswith(('http://', 'https://')):
# Make sure the URL is absolute
scheme, host = urlparse.urlparse(self.base_url)[:2]
url = urlparse.urlunparse((scheme, host, url, None, None, None))
# Workaround #10382, IE6-IE9 bug when post and redirect with hash
if status == 303 and '#' in url:
user_agent = self.environ.get('HTTP_USER_AGENT', '')
match_trident = self._trident_re.search(user_agent)
if ' MSIE ' in user_agent and \
(not match_trident or int(match_trident.group(1)) < 6):
url = url.replace('#', '#__msie303:')
self.send_header('Location', url)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', 0)
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.end_headers()
raise RequestDone
def send(self, content, content_type='text/html', status=200):
self.send_response(status)
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
if isinstance(content, basestring):
self.send_header('Content-Length', len(content))
self.end_headers()
if self.method != 'HEAD':
self.write(content)
raise RequestDone
def send_error(self, exc_info, template='error.html',
content_type='text/html', status=500, env=None, data={}):
try:
if template.endswith('.html'):
if env:
from trac.web.chrome import Chrome, add_stylesheet
add_stylesheet(self, 'common/css/code.css')
try:
data = Chrome(env).render_template(self, template,
data, 'text/html')
except Exception:
# second chance rendering, in "safe" mode
data['trac_error_rendering'] = True
data = Chrome(env).render_template(self, template,
data, 'text/html')
else:
content_type = 'text/plain'
data = '%s\n\n%s: %s' % (data.get('title'),
data.get('type'),
data.get('message'))
except Exception: # failed to render
data = get_last_traceback()
content_type = 'text/plain'
if isinstance(data, unicode):
data = data.encode('utf-8')
self.send_response(status)
self._outheaders = []
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(data))
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders,
exc_info)
if self.method != 'HEAD':
self.write(data)
raise RequestDone
def send_no_content(self):
self.send_response(204)
self.send_header('Content-Length', 0)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
raise RequestDone
def send_file(self, path, mimetype=None):
"""Send a local file to the browser.
This method includes the "Last-Modified", "Content-Type" and
"Content-Length" headers in the response, corresponding to the file
attributes. It also checks the last modification time of the local file
against the "If-Modified-Since" provided by the user agent, and sends a
"304 Not Modified" response if it matches.
"""
if not os.path.isfile(path):
raise HTTPNotFound(_("File %(path)s not found", path=path))
stat = os.stat(path)
mtime = datetime.fromtimestamp(stat.st_mtime, localtz)
last_modified = http_date(mtime)
if last_modified == self.get_header('If-Modified-Since'):
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
if not mimetype:
mimetype = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', stat.st_size)
self.send_header('Last-Modified', last_modified)
use_xsendfile = getattr(self, 'use_xsendfile', False)
if use_xsendfile:
xsendfile_header = getattr(self, 'xsendfile_header', None)
if xsendfile_header:
self.send_header(xsendfile_header, os.path.abspath(path))
else:
use_xsendfile = False
self.end_headers()
if not use_xsendfile and self.method != 'HEAD':
fileobj = open(path, 'rb')
file_wrapper = self.environ.get('wsgi.file_wrapper', _FileWrapper)
self._response = file_wrapper(fileobj, 4096)
raise RequestDone
def read(self, size=None):
"""Read the specified number of bytes from the request body."""
fileobj = self.environ['wsgi.input']
if size is None:
size = self.get_header('Content-Length')
if size is None:
size = -1
else:
size = int(size)
data = fileobj.read(size)
return data
CHUNK_SIZE = 4096
def write(self, data):
"""Write the given data to the response body.
*data* **must** be a `str` string or an iterable instance
which iterates `str` strings, encoded with the charset which
has been specified in the ``'Content-Type'`` header or UTF-8
otherwise.
Note that when the ``'Content-Length'`` header is specified,
its value either corresponds to the length of *data*, or, if
there are multiple calls to `write`, to the cumulative length
of the *data* arguments.
"""
if not self._write:
self.end_headers()
try:
chunk_size = self.CHUNK_SIZE
bufsize = 0
buf = []
buf_append = buf.append
if isinstance(data, basestring):
data = [data]
for chunk in data:
if isinstance(chunk, unicode):
raise ValueError("Can't send unicode content")
if not chunk:
continue
bufsize += len(chunk)
buf_append(chunk)
if bufsize >= chunk_size:
self._write(''.join(buf))
bufsize = 0
buf[:] = ()
if bufsize > 0:
self._write(''.join(buf))
except (IOError, socket.error) as e:
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
raise RequestDone
# Note that mod_wsgi raises an IOError with only a message
# if the client disconnects
if 'mod_wsgi.version' in self.environ and \
e.args[0] in ('failed to write data',
'client connection closed'):
raise RequestDone
raise
# Internal methods
def _parse_arg_list(self):
"""Parse the supplied request parameters into a list of
`(name, value)` tuples.
"""
fp = self.environ['wsgi.input']
# Avoid letting cgi.FieldStorage consume the input stream when the
# request does not contain form data
ctype = self.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype not in ('application/x-www-form-urlencoded',
'multipart/form-data'):
fp = StringIO('')
# Python 2.6 introduced a backwards incompatible change for
# FieldStorage where QUERY_STRING is no longer ignored for POST
# requests. We'll keep the pre 2.6 behaviour for now...
if self.method == 'POST':
qs_on_post = self.environ.pop('QUERY_STRING', '')
fs = _FieldStorage(fp, environ=self.environ, keep_blank_values=True)
if self.method == 'POST':
self.environ['QUERY_STRING'] = qs_on_post
args = []
for value in fs.list or ():
try:
name = unicode(value.name, 'utf-8')
if not value.filename:
value = unicode(value.value, 'utf-8')
except UnicodeDecodeError as e:
raise HTTPBadRequest(
_("Invalid encoding in form data: %(msg)s",
msg=exception_to_unicode(e)))
args.append((name, value))
return args
def _parse_cookies(self):
cookies = Cookie()
header = self.get_header('Cookie')
if header:
cookies.load(header, ignore_parse_errors=True)
return cookies
def _parse_headers(self):
headers = [(name[5:].replace('_', '-').lower(), value)
for name, value in self.environ.items()
if name.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
headers.append(('content-length', self.environ['CONTENT_LENGTH']))
if 'CONTENT_TYPE' in self.environ:
headers.append(('content-type', self.environ['CONTENT_TYPE']))
return headers
def _parse_languages(self):
"""The list of languages preferred by the remote user, taken from the
``Accept-Language`` header.
"""
header = self.get_header('Accept-Language') or 'en-us'
langs = []
for i, lang in enumerate(header.split(',')):
code, params = cgi.parse_header(lang)
q = 1
if 'q' in params:
try:
q = float(params['q'])
except ValueError:
q = 0
langs.append((-q, i, code))
langs.sort()
return [code for q, i, code in langs]
def _reconstruct_url(self):
"""Reconstruct the absolute base URL of the application."""
host = self.get_header('Host')
if not host:
# Missing host header, so reconstruct the host from the
# server name and port
default_port = {'http': 80, 'https': 443}
if self.server_port and self.server_port != \
default_port[self.scheme]:
host = '%s:%d' % (self.server_name, self.server_port)
else:
host = self.server_name
return urlparse.urlunparse((self.scheme, host, self.base_path, None,
None, None))
def _send_cookie_headers(self):
for name in self.outcookie.keys():
path = self.outcookie[name].get('path')
if path:
path = path.replace(' ', '%20') \
.replace(';', '%3B') \
.replace(',', '%3C')
self.outcookie[name]['path'] = path
cookies = to_unicode(self.outcookie.output(header='')).encode('utf-8')
for cookie in cookies.splitlines():
self._outheaders.append(('Set-Cookie', cookie.strip()))
__no_apidoc__ = _HTTPException_subclass_names
|
|
import datetime
from dateutil.relativedelta import relativedelta
import mock
from django import template
from django.test import TestCase
from django.utils.html import strip_tags
from timepiece import utils
from timepiece.templatetags import timepiece_tags as tags
from . import factories
class HumanizeTimeTestCase(TestCase):
def test_seconds(self):
seconds_display = tags.humanize_seconds((5.5 * 3600) + 3)
expected = u'05:30:03'
self.assertEquals(
seconds_display, expected,
"Should return {0}, returned {1}".format(expected, seconds_display)
)
def test_seconds_negative(self):
seconds_display = tags.humanize_seconds((-2.5 * 3600) - 4)
expected = u'-02:30:04'
self.assertTrue(seconds_display.startswith('<span'))
self.assertTrue('negative-time' in seconds_display)
self.assertEquals(
strip_tags(seconds_display), expected,
"Should return {0}, returned {1}".format(expected, seconds_display)
)
def test_seconds_overnight(self):
seconds_display = tags.humanize_seconds((30 * 3600) + 2)
expected = u'30:00:02'
self.assertEquals(
seconds_display, expected,
"Should return {0}, returned {1}".format(expected, seconds_display)
)
def test_seconds_format(self):
seconds_display = tags.humanize_seconds(120, '{minutes:02d}:{minutes}')
expected = u'02:2'
self.assertEquals(
seconds_display, expected,
"Should return {0}, returned {1}".format(expected, seconds_display)
)
def test_seconds_negative_format(self):
seconds_display = tags.humanize_seconds(-120, None, '-{minutes:02d}')
expected = u'-02'
self.assertEquals(
seconds_display, expected,
"Should return {0}, returned {1}".format(expected, seconds_display)
)
def test_hours(self):
hours_display = tags.humanize_hours(7.5)
expected = u'07:30:00'
self.assertEquals(
hours_display, expected,
"Should return {0}, returned {1}".format(expected, hours_display)
)
def test_hours_format(self):
hours_display = tags.humanize_hours(7.1, '{minutes:02d}:{minutes}')
expected = u'06:6'
self.assertEquals(
hours_display, expected,
"Should return {0}, returned {1}".format(expected, hours_display)
)
class DateFiltersTagTestCase(TestCase):
def test_default_options(self):
# default everything we can
# response looks like the right format roughly
retval = tags.date_filters("FORM_ID")
self.assertEqual("FORM_ID", retval['form_id'])
filters = retval['filters']
self.assertIn("Past 12 Months", filters)
self.assertIn("Years", filters)
self.assertIn("Quarters (Calendar Year)", filters)
self.assertEqual(3, len(filters))
self.assertEqual(2, len(retval))
def test_months(self):
# Look more closely at months response
retval = tags.date_filters("FORM_ID", options=('months',))
filter = retval['filters']['Past 12 Months']
self.assertEqual(12, len(filter))
for name, first_date, last_date in filter:
# same month "20xx-mm-dd"
self.assertEqual(first_date[4:7], last_date[4:7])
# same year
self.assertEqual(first_date[:5], last_date[:5])
# starts on the first
self.assertEqual("-01", first_date[-3:])
def test_years(self):
# Look more closely at years response
retval = tags.date_filters("FORM_ID", options=('years',))
filter = retval['filters']['Years']
self.assertEqual(4, len(filter))
for year, first_date, last_date in filter:
# start on jan 1, 20xx "20xx-01-01"
self.assertTrue(first_date.startswith("20") and first_date.endswith("-01-01"))
# end on Dec. 31, 20xx "20xx-12-31"
self.assertTrue(last_date.startswith("20") and last_date.endswith("-12-31"))
# start and end in same year, "20xx-"
self.assertEqual(year, first_date[:4])
self.assertEqual(year, last_date[:4])
def test_quarters(self):
# Look more closely at quarters response
retval = tags.date_filters("FORM_ID", options=('quarters',))
filter = retval['filters']['Quarters (Calendar Year)']
self.assertEqual(8, len(filter))
for name, first_date, last_date in filter:
self.assertTrue(name.startswith("Q"))
# starts on the first "20xx-yy-01"
self.assertEqual("-01", first_date[-3:])
# start in the quarter we claim to
self.assertEqual(name[-4:], first_date[:4])
# start and end in same year
self.assertEqual(first_date[:5], last_date[:5])
def test_no_use_range(self):
# sniff test of turning off use_range
retval = tags.date_filters(
"FORM_ID", options=('years',), use_range=False)
filter = retval['filters']['Years']
for year, first_date, last_date in filter:
# first date is blank
self.assertEqual('', first_date)
class TimeTagTestCase(TestCase):
def test_seconds_to_hours(self):
# basic math
self.assertEqual(0.5, tags.seconds_to_hours(1800))
self.assertEqual(2.0, tags.seconds_to_hours(7200))
# rounding
self.assertEqual(2.0, tags.seconds_to_hours(7201))
def test_week_start(self):
start = tags.week_start(datetime.date(2013, 1, 10))
self.assertEqual(start.date(), datetime.date(2013, 1, 7))
class MiscTagTestCase(TestCase):
def test_get_uninvoiced_hours(self):
# uninvoiced hours are any hours without status 'invoiced' or
# 'not-invoiced' [sic]
class Entry(object):
def __init__(self, status, hours):
self.status = status
self.hours = hours
entries = [
Entry('invoiced', 999),
Entry('not-invoiced', 1),
Entry('other', 37),
Entry('shoes', 12)
]
retval = tags.get_uninvoiced_hours(entries)
self.assertEqual(49, retval)
def test_project_report_url_for_contract(self):
dt = datetime.date(2013, 1, 10)
contract = mock.Mock(start_date=dt, end_date=dt)
project = mock.Mock(id=54)
result = tags._project_report_url_params(contract, project)
expected_url = {
'from_date': '2013-01-10',
'to_date': '2013-01-10',
'billable': 1,
'non_billable': 0,
'paid_leave': 0,
'trunc': 'month',
'projects_1': project.id,
}
self.assertEqual(expected_url, result)
class SumHoursTagTestCase(TestCase):
def setUp(self):
class Entry(object):
def __init__(self, seconds):
self.seconds = seconds
def get_total_seconds(self):
return self.seconds
self.entries = [
Entry(1),
Entry(2.5),
Entry(5)
]
def test_sum_hours(self):
retval = tags.sum_hours(self.entries)
self.assertEqual(8.5, retval)
class ArithmeticTagTestCase(TestCase):
def test_multiply(self):
self.assertEqual(1.0, tags.multiply(1, 1))
self.assertEqual(1.5, tags.multiply(3, 0.5))
# numbers can be strings
self.assertEqual(3.0, tags.multiply("1.5", "2"))
def test_get_max_hours(self):
ctx = {
'project_progress': [
{'worked': 1, 'assigned': 2},
{'worked': 3, 'assigned': 0},
{'worked': 2, 'assigned': 1},
]
}
self.assertEqual(3, tags.get_max_hours(ctx))
def test_get_max_hours_min_is_zero(self):
# min of max hours is zero
ctx = {
'project_progress': [
{'worked': -1, 'assigned': -4},
{'worked': -3, 'assigned': -5},
]
}
self.assertEqual(0, tags.get_max_hours(ctx))
class TestProjectHoursForContract(TestCase):
def setUp(self):
self.user = factories.User()
self.a_project = factories.NonbillableProject()
self.another_project = factories.NonbillableProject()
self.billable_project = factories.BillableProject()
self.project_without_hours = factories.NonbillableProject()
projects = [
self.a_project,
self.another_project,
self.billable_project,
self.project_without_hours,
]
self.contract = factories.ProjectContract(projects=projects)
activity = factories.Activity(billable=True)
unbillable_activity = factories.Activity(billable=False)
start_time = datetime.datetime.now()
factories.Entry(
project=self.a_project, activity=activity, start_time=start_time,
end_time=start_time + relativedelta(hours=1))
factories.Entry(
project=self.a_project, activity=unbillable_activity,
start_time=start_time, end_time=start_time + relativedelta(hours=16))
factories.Entry(
project=self.another_project, activity=activity,
start_time=start_time, end_time=start_time + relativedelta(hours=2))
factories.Entry(
project=self.billable_project, activity=activity,
start_time=start_time, end_time=start_time + relativedelta(hours=4))
factories.Entry(
project=self.billable_project, activity=unbillable_activity,
start_time=start_time, end_time=start_time + relativedelta(hours=8))
def test_project_hours_for_contract(self):
retval = tags.project_hours_for_contract(self.contract, self.a_project)
# Includes billable and nonbillable by default
self.assertEqual(17, retval)
def test_project_hours_for_contract_none(self):
# Try it with the aggregate returning None
retval = tags.project_hours_for_contract(
self.contract, self.project_without_hours)
self.assertEqual(0, retval)
def test_project_hours_for_contract_billable(self):
# only include billable hours
retval = tags.project_hours_for_contract(
self.contract, self.billable_project, 'billable')
self.assertEqual(4, retval)
def test_project_hours_for_contract_nonbillable(self):
# only include non-billable hours
retval = tags.project_hours_for_contract(
self.contract, self.billable_project, 'nonbillable')
self.assertEqual(8, retval)
def test_project_hours_for_contract_badbillable(self):
# template tag does syntax check on the 'billable' arg
with self.assertRaises(template.TemplateSyntaxError):
tags.project_hours_for_contract(
self.contract, self.a_project, 'invalidarg')
class AddParametersTest(TestCase):
def test_new_parameters(self):
"""Tag should add parameters to base URL after a '?'."""
url = '/hello/'
params = {'foo': 'bar'}
retval = tags.add_parameters(url, params)
self.assertEqual(retval, url + '?foo=bar')
def test_additional_parameters(self):
"""Tag should add parameters to base URL after a '&'."""
url = '/hello/?user=1'
params = {'foo': 'bar'}
retval = tags.add_parameters(url, params)
self.assertEqual(retval, url + '&foo=bar')
def test_repeat_parameters(self):
"""Tag should append param even if another value exists for it."""
url = '/hello/?foo=bar'
params = {'foo': 'bar'}
retval = tags.add_parameters(url, params)
self.assertEqual(retval, url + '&foo=bar')
def test_no_parameters(self):
"""Tag should return base URL when no parameters are given."""
url = '/hello/'
params = {}
retval = tags.add_parameters(url, params)
self.assertEqual(retval, url)
def test_special_chars(self):
"""Tag should escape HTML entities."""
url = '/hello/'
params = {'foo': '?'}
retval = tags.add_parameters(url, params)
self.assertEqual(retval, url + '?foo=%3F')
class CreateDictTest(TestCase):
def test_create_dict(self):
retVal = tags.create_dict(foo='bar', a='b')
self.assertEquals(len(retVal), 2)
self.assertEquals(retVal['foo'], 'bar')
self.assertEquals(retVal['a'], 'b')
def test_create_empty_dict(self):
retVal = tags.create_dict()
self.assertEquals(retVal, {})
class AddTimezoneTest(TestCase):
def test_add_timezone(self):
d = datetime.datetime.now()
retVal = tags.add_timezone(d)
self.assertEquals(retVal, utils.add_timezone(d))
|
|
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import\
RNNEncoderDecoder,\
prototype_phrase_state,\
parse_input
from experiments.nmt.numpy_compat import argpartition
from collections import OrderedDict
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_decs):
self.enc_decs = enc_decs
def compile(self):
num_models = len(self.enc_decs)
self.comp_repr = []
self.comp_init_states = []
self.comp_next_probs = []
self.comp_next_states = []
for i in xrange(num_models):
self.comp_repr.append(self.enc_decs[i].create_representation_computer())
self.comp_init_states.append(self.enc_decs[i].create_initializers())
self.comp_next_probs.append(self.enc_decs[i].create_next_probs_computer())
self.comp_next_states.append(self.enc_decs[i].create_next_states_computer())
def search(self, seq, n_samples, eos_id, unk_id, ignore_unk=False, minlen=1, final=False):
num_models = len(self.enc_decs)
c = []
for i in xrange(num_models):
c.append(self.comp_repr[i](seq)[0])
states = []
for i in xrange(num_models):
states.append(map(lambda x : x[None, :], self.comp_init_states[i](c[i])))
dim = states[0][0].shape[1]
num_levels = len(states[0])
fin_trans = []
fin_costs = []
trans = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t : t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
#log_probs = (numpy.log(self.comp_next_probs_0(c, k, last_words, *states)[0]) + numpy.log(self.comp_next_probs_1(c, k, last_words, *states)[0]))/2.
log_probs = sum(numpy.log(self.comp_next_probs[i](c[i], k, last_words, *states[i])[0]) for i in xrange(num_models))/num_models
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:,unk_id] = -numpy.inf
# TODO: report me in the paper!!!
if k < minlen:
log_probs[:,eos_id] = -numpy.inf
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
trans_indices = best_costs_indices / voc_size
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = []
for i in xrange(num_models):
new_states.append([numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)])
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
for j in xrange(num_models):
new_states[j][level][i] = states[j][level][orig_idx]
inputs[i] = next_word
for i in xrange(num_models):
new_states[i]=self.comp_next_states[i](c[i], k, inputs, *new_states[i])
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != eos_id:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
for i in xrange(num_models):
states[i]=map(lambda x : x[indices], new_states[i])
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, eos_id=eos_id, unk_id=unk_id, ignore_unk=False, minlen=minlen, final=final)
elif not final:
logger.warning("No appropriate translations: using larger vocabulary")
raise RuntimeError
else:
logger.warning("No appropriate translation: return empty translation")
fin_trans=[[]]
fin_costs = [0.0]
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, n_samples, eos_id, unk_id,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
normalize_p = 1.0,
alpha=1, verbose=False, final=False, wp=0.):
if beam_search:
sentences = []
trans, costs = beam_search.search(seq, n_samples, eos_id=eos_id, unk_id=unk_id,
ignore_unk=ignore_unk, minlen=len(seq) / 2, final=final)
counts = [len(s) for s in trans]
if normalize:
costs = [co / ((max(cn,1))**normalize_p) + wp * cn for co, cn in zip(costs, counts)]
else:
costs = [co + wp * cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.word_indxs, trans[i]) # Make sure that indices_to_words has been changed
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
print "{}: {}".format(costs[i], sentences[i])
return sentences, costs, trans
elif sampler:
raise NotImplementedError
else:
raise Exception("I don't know what to do")
def update_dicts(indices, d, D, C, full):
for word in indices:
if word not in d:
if len(d) == full:
return True
if word not in D: # Also not in C
key, value = C.popitem()
del D[key]
d[word] = 0
D[word] = 0
else: # Also in C as (d UNION C) is D. (d INTERSECTION C) is the empty set.
d[word] = 0
del C[word]
return False
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-search) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--normalize-p",
type=float, default=1.0,
help="Controls preference to longer output. Only used if `normalize` is true.")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("--topn-file",
type=str,
help="Binarized topn list for each source word (Vocabularies must correspond)")
parser.add_argument("--num-common",
type=int,
help="Number of always used common words (inc. <eos>, UNK) \
(With --less-transfer, total number of words)")
parser.add_argument("--num-ttables",
type=int,
help="Number of target words taken from the T-tables for each input word")
parser.add_argument("--less-transfer",
action="store_true", default=False,
help="Keep the same vocabulary for many sentences. \
--num-common is now the total number of words used. \
No vocabulary expansion in case of failure to translate")
parser.add_argument("--no-reset", action="store_true", default=False,
help="Do not reset the dicts when changing vocabularies")
parser.add_argument("--change-every", type=int, default=100,
help="Change the dicts at each multiple of this number. \
Use -1 to change only if full")
parser.add_argument("--final",
action="store_true", default=False,
help="Do not try to expand the vocabulary if a translation fails \
.ignored with --less-transfer (no expansion)")
parser.add_argument("--n-best", action="store_true", default=False,
help="Write n-best list (of size --beam-size)")
parser.add_argument("--start", type=int, default=0,
help="For n-best, first sentence id")
parser.add_argument("--wp", type=float, default=0.,
help="Word penalty. >0: shorter translations \
<0: longer ones")
parser.add_argument("--models", nargs = '+', required=True,
help="path to the models")
parser.add_argument("--changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_phrase_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
if 'rolling_vocab' not in state:
state['rolling_vocab'] = 0
if 'save_algo' not in state:
state['save_algo'] = 0
if 'save_gs' not in state:
state['save_gs'] = 0
if 'save_iter' not in state:
state['save_iter'] = -1
if 'var_src_len' not in state:
state['var_src_len'] = False
with open(args.topn_file, 'rb') as f:
topn = cPickle.load(f) # Load dictionary (source word index : list of target word indices)
if args.less_transfer:
for elt in topn:
topn[elt] = topn[elt][:args.num_ttables] # Take the first args.num_ttables only
else:
for elt in topn:
topn[elt] = set(topn[elt][:args.num_ttables]) # Take the first args.num_ttables only and convert list to set
num_models = len(args.models)
rng = numpy.random.RandomState(state['seed'])
enc_decs = []
lm_models = []
original_W_0_dec_approx_embdr = []
original_W2_dec_deep_softmax = []
original_b_dec_deep_softmax = []
for i in xrange(num_models):
enc_decs.append(RNNEncoderDecoder(state, rng, skip_init=True))
enc_decs[i].build()
lm_models.append(enc_decs[i].create_lm_model())
lm_models[i].load(args.models[i])
original_W_0_dec_approx_embdr.append(lm_models[i].params[lm_models[i].name2pos['W_0_dec_approx_embdr']].get_value())
original_W2_dec_deep_softmax.append(lm_models[i].params[lm_models[i].name2pos['W2_dec_deep_softmax']].get_value())
original_b_dec_deep_softmax.append(lm_models[i].params[lm_models[i].name2pos['b_dec_deep_softmax']].get_value())
# On GPU, this will free memory for the next models
# Additional gains could be made by rolling the source vocab
lm_models[i].params[lm_models[i].name2pos['W_0_dec_approx_embdr']].set_value(numpy.zeros((1,1), dtype=numpy.float32))
lm_models[i].params[lm_models[i].name2pos['W2_dec_deep_softmax']].set_value(numpy.zeros((1,1), dtype=numpy.float32))
lm_models[i].params[lm_models[i].name2pos['b_dec_deep_softmax']].set_value(numpy.zeros((1), dtype=numpy.float32))
indx_word = cPickle.load(open(state['word_indx'],'rb')) #Source w2i
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_decs)
beam_search.compile()
else:
raise NotImplementedError
#sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'],'r')) #Source i2w
original_target_i2w = lm_models[0].word_indxs.copy()
# I don't think that we need target_word2index
max_words = len(original_b_dec_deep_softmax[0])
if args.less_transfer:
# Use OrderedDict instead of set for reproducibility
d = OrderedDict() # Up to now
D = OrderedDict() # Full
C = OrderedDict() # Allowed to reject
prev_line = 0
logger.info("%d" % prev_line)
D_dict = OrderedDict()
output = False
for i in xrange(args.num_common):
D[i] = 0
C[i] = 0
null_unk_indices = [state['null_sym_target'],state['unk_sym_target']]
update_dicts(null_unk_indices, d, D, C, args.num_common)
with open(args.source, 'r') as f:
for i, line in enumerate(f):
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src) # seq is the ndarray of indices
indices = []
for elt in seq[:-1]: # Exclude the EOL token
if elt != 1: # Exclude OOV (1 will not be a key of topn)
indices.extend(topn[elt]) # Add topn best unigram translations for each source word
output = update_dicts(indices, d, D, C, args.num_common)
if (i % args.change_every) == 0 and args.change_every > 0 and i > 0:
output = True
if output:
D_dict[prev_line] = D.copy() # Save dictionary for the lines preceding this one
prev_line = i
logger.info("%d" % i)
output = False
d = OrderedDict()
if args.no_reset:
C = D.copy()
else:
D = OrderedDict() # Full
C = OrderedDict() # Allowed to reject
for i in xrange(args.num_common):
D[i] = 0
C[i] = 0
null_unk_indices = [state['null_sym_target'], state['unk_sym_target']]
update_dicts(null_unk_indices, d, D, C, args.num_common)
update_dicts(indices, d, D, C, args.num_common) # Assumes you cannot fill d with only 1 line
D_dict[prev_line] = D.copy()
if args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.debug("Beam size: {}".format(n_samples))
for i, line in enumerate(fsrc):
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src) # seq is the ndarray of indices
# For now, keep all input words in the model.
# In the future, we may want to filter them to save on memory, but this isn't really much of an issue now
if args.verbose:
print "Parsed Input:", parsed_in
if args.less_transfer:
if i in D_dict:
indices = D_dict[i].keys()
eos_id = indices.index(state['null_sym_target']) # Find new eos and unk positions
unk_id = indices.index(state['unk_sym_target'])
for j in xrange(num_models):
lm_models[j].params[lm_models[j].name2pos['W_0_dec_approx_embdr']].set_value(original_W_0_dec_approx_embdr[j][indices])
lm_models[j].params[lm_models[j].name2pos['W2_dec_deep_softmax']].set_value(original_W2_dec_deep_softmax[j][:, indices])
lm_models[j].params[lm_models[j].name2pos['b_dec_deep_softmax']].set_value(original_b_dec_deep_softmax[j][indices])
lm_models[0].word_indxs = dict([(k, original_target_i2w[index]) for k, index in enumerate(indices)]) # target index2word
trans, costs, _ = sample(lm_models[0], seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize,
normalize_p=args.normalize_p, eos_id=eos_id, unk_id=unk_id, final=True, wp=args.wp)
else:
# Extract the indices you need
indices = set()
for elt in seq[:-1]: # Exclude the EOL token
if elt != 1: # Exclude OOV (1 will not be a key of topn)
indices = indices.union(topn[elt]) # Add topn best unigram translations for each source word
num_common_words = args.num_common
while True:
if num_common_words >= max_words:
final = True
num_common_words = max_words
else:
final = False
if args.final: # No matter the number of words
final = True
indices = indices.union(set(xrange(num_common_words))) # Add common words
indices = list(indices) # Convert back to list for advanced indexing
eos_id = indices.index(state['null_sym_target']) # Find new eos and unk positions
unk_id = indices.index(state['unk_sym_target'])
# Set the target word matrices and biases
for j in xrange(num_models):
lm_models[j].params[lm_models[j].name2pos['W_0_dec_approx_embdr']].set_value(original_W_0_dec_approx_embdr[j][indices])
lm_models[j].params[lm_models[j].name2pos['W2_dec_deep_softmax']].set_value(original_W2_dec_deep_softmax[j][:, indices])
lm_models[j].params[lm_models[j].name2pos['b_dec_deep_softmax']].set_value(original_b_dec_deep_softmax[j][indices])
lm_models[0].word_indxs = dict([(k, original_target_i2w[index]) for k, index in enumerate(indices)]) # target index2word
try:
trans, costs, _ = sample(lm_models[0], seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize,
normalize_p=args.normalize_p, eos_id=eos_id, unk_id=unk_id, final=final)
break # Breaks only if it succeeded (If final=True, will always succeed)
except RuntimeError:
indices = set(indices)
num_common_words *= 2
if not args.n_best:
best = numpy.argmin(costs)
print >>ftrans, trans[best]
else:
order = numpy.argsort(costs)
best = order[0]
for elt in order:
print >>ftrans, str(i+args.start) + ' ||| ' + trans[elt] + ' ||| ' + str(costs[elt])
if args.verbose:
print "Translation:", trans[best]
total_cost += costs[best]
if (i + 1) % 100 == 0:
ftrans.flush()
logger.debug("Current speed is {} per sentence".
format((time.time() - start_time) / (i + 1)))
print "Total cost of the translations: {}".format(total_cost)
fsrc.close()
ftrans.close()
else:
raise NotImplementedError
if __name__ == "__main__":
main()
|
|
from abc import ABC, abstractmethod
from collections import defaultdict
from threading import Event, Lock, Thread
from typing import Dict, Iterable, Iterator, List, Set, Tuple, Type, TypeVar
from eventsourcing.application import (
Application,
NotificationLog,
ProcessEvent,
Section,
)
from eventsourcing.domain import AggregateEvent
from eventsourcing.persistence import (
Mapper,
Notification,
ProcessRecorder,
Tracking,
)
from eventsourcing.utils import get_topic, resolve_topic
class Follower(Application):
"""
Extends the :class:`~eventsourcing.application.Application` class
by using a process recorder as its application recorder, by keeping
track of the applications it is following, and pulling and processing
new domain event notifications through its :func:`policy` method.
"""
def __init__(self) -> None:
super().__init__()
self.readers: Dict[
str,
Tuple[
NotificationLogReader,
Mapper[AggregateEvent],
],
] = {}
self.recorder: ProcessRecorder
def construct_recorder(self) -> ProcessRecorder:
"""
Constructs and returns a :class:`~eventsourcing.persistence.ProcessRecorder`
for the application to use as its application recorder.
"""
return self.factory.process_recorder()
def follow(self, name: str, log: NotificationLog) -> None:
"""
Constructs a notification log reader and a mapper for
the named application, and adds them to its collection
of readers.
"""
assert isinstance(self.recorder, ProcessRecorder)
reader = NotificationLogReader(log)
mapper = self.construct_mapper(name)
self.readers[name] = (reader, mapper)
def pull_and_process(self, name: str) -> None:
"""
Pulls and processes unseen domain event notifications
from the notification log reader of the names application.
Converts received event notifications to domain
event objects, and then calls the :func:`policy`
with a new :class:`ProcessEvent` object which
contains a :class:`~eventsourcing.persistence.Tracking`
object that keeps track of the name of the application
and the position in its notification log from which the
domain event notification was pulled. The policy will
save aggregates to the process event object, using its
:func:`~ProcessEvent.save` method, which collects pending
domain events using the aggregates'
:func:`~eventsourcing.domain.Aggregate.collect_events`
method, and the process event object will then be recorded
by calling the :func:`record` method.
"""
reader, mapper = self.readers[name]
start = self.recorder.max_tracking_id(name) + 1
for notification in reader.select(start=start):
domain_event = mapper.to_domain_event(notification)
process_event = ProcessEvent(
Tracking(
application_name=name,
notification_id=notification.id,
)
)
self.policy(
domain_event,
process_event,
)
self.record(process_event)
@abstractmethod
def policy(
self,
domain_event: AggregateEvent,
process_event: ProcessEvent,
) -> None:
"""
Abstract domain event processing policy method. Must be
implemented by event processing applications. When
processing the given domain event, event processing
applications must use the :func:`~ProcessEvent.save`
method of the given process event object (instead of
the application's :func:`~eventsourcing.application.Application.save`
method) to collect pending events from changed aggregates,
so that the new domain events will be recorded atomically
with tracking information about the position of the given
domain event's notification.
"""
class Promptable(ABC):
"""
Abstract base class for "promptable" objects.
"""
@abstractmethod
def receive_prompt(self, leader_name: str) -> None:
"""
Receives the name of leader that has new domain
event notifications.
"""
class Leader(Application):
"""
Extends the :class:`~eventsourcing.application.Application`
class by also being responsible for keeping track of
followers, and prompting followers when there are new
domain event notifications to be pulled and processed.
"""
def __init__(self) -> None:
super().__init__()
self.followers: List[Promptable] = []
def lead(self, follower: Promptable) -> None:
"""
Adds given follower to a list of followers.
"""
self.followers.append(follower)
def notify(self, new_events: List[AggregateEvent]) -> None:
"""
Extends the application :func:`~eventsourcing.application.Application.notify`
method by calling :func:`prompt_followers` whenever new events have just
been saved.
"""
super().notify(new_events)
if len(new_events):
self.prompt_followers()
def prompt_followers(self) -> None:
"""
Prompts followers by calling their :func:`~Promptable.receive_prompt`
methods with the name of the application.
"""
name = self.__class__.__name__
for follower in self.followers:
follower.receive_prompt(name)
class ProcessApplication(Leader, Follower, ABC):
"""
Base class for event processing applications
that are both "leaders" and followers".
"""
class System:
"""
Defines a system of applications.
"""
def __init__(
self,
pipes: Iterable[Iterable[Type[Application]]],
):
nodes: Dict[str, Type[Application]] = {}
edges: Set[Tuple[str, str]] = set()
# Build nodes and edges.
for pipe in pipes:
follower_cls = None
for cls in pipe:
nodes[cls.__name__] = cls
if follower_cls is None:
follower_cls = cls
else:
leader_cls = follower_cls
follower_cls = cls
edges.add(
(
leader_cls.__name__,
follower_cls.__name__,
)
)
self.edges = list(edges)
self.nodes: Dict[str, str] = {}
for name in nodes:
topic = get_topic(nodes[name])
self.nodes[name] = topic
# Identify leaders and followers.
self.follows: Dict[str, List[str]] = defaultdict(list)
self.leads: Dict[str, List[str]] = defaultdict(list)
for edge in edges:
self.leads[edge[0]].append(edge[1])
self.follows[edge[1]].append(edge[0])
# Check followers are followers.
for name in self.follows:
if not issubclass(nodes[name], Follower):
raise TypeError("Not a follower class: %s" % nodes[name])
# Check each process is a process application class.
for name in self.processors:
if not issubclass(nodes[name], ProcessApplication):
raise TypeError("Not a process application class: %s" % nodes[name])
@property
def leaders(self) -> Iterable[str]:
return self.leads.keys()
@property
def leaders_only(self) -> Iterable[str]:
for name in self.leads.keys():
if name not in self.follows:
yield name
@property
def followers(self) -> Iterable[str]:
return self.follows.keys()
@property
def processors(self) -> Iterable[str]:
return set(self.leaders).intersection(self.followers)
def get_app_cls(self, name: str) -> Type[Application]:
cls = resolve_topic(self.nodes[name])
assert issubclass(cls, Application)
return cls
def leader_cls(self, name: str) -> Type[Leader]:
cls = self.get_app_cls(name)
if issubclass(cls, Leader):
return cls
else:
cls = type(
cls.__name__,
(Leader, cls),
{},
)
assert issubclass(cls, Leader)
return cls
def follower_cls(self, name: str) -> Type[Follower]:
cls = self.get_app_cls(name)
assert issubclass(cls, Follower)
return cls
A = TypeVar("A")
class Runner(ABC):
"""
Abstract base class for system runners.
"""
def __init__(self, system: System):
self.system = system
self.is_started = False
@abstractmethod
def start(self) -> None:
"""
Starts the runner.
"""
if self.is_started:
raise RunnerAlreadyStarted()
self.is_started = True
@abstractmethod
def stop(self) -> None:
"""
Stops the runner.
"""
@abstractmethod
def get(self, cls: Type[A]) -> A:
"""
Returns an application instance for given application class.
"""
class RunnerAlreadyStarted(Exception):
"""
Raised when runner is already started.
"""
class SingleThreadedRunner(Runner, Promptable):
"""
Runs a :class:`System` in a single thread.
A single threaded runner is a runner, and so implements the
:func:`start`, :func:`stop`, and :func:`get` methods.
A single threaded runner is also a :class:`Promptable` object, and
implements the :func:`receive_prompt` method by collecting prompted
names.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.prompts_received: List[str] = []
self.is_prompting = False
def start(self) -> None:
"""
Starts the runner.
The applications are constructed, and setup to lead and follow
each other, according to the system definition.
The followers are setup to follow the applications they follow
(have a notification log reader with the notification log of the
leader), and their leaders are setup to lead the runner itself
(send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
self.apps[name] = self.system.follower_cls(name)()
# Construct leaders.
for name in self.system.leaders_only:
self.apps[name] = self.system.leader_cls(name)()
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
leader.lead(self)
follower.follow(leader.__class__.__name__, leader.log)
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
Unless this method has previously been called but not
yet returned, it will then proceed to forward the prompts
received to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
if leader_name not in self.prompts_received:
self.prompts_received.append(leader_name)
if not self.is_prompting:
self.is_prompting = True
while self.prompts_received:
prompt = self.prompts_received.pop(0)
for name in self.system.leads[prompt]:
follower = self.apps[name]
assert isinstance(follower, Follower)
follower.pull_and_process(prompt)
self.is_prompting = False
def stop(self) -> None:
self.apps.clear()
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunner(Runner):
"""
Runs a :class:`System` with a :class:`MultiThreadedRunnerThread` for each
follower in the system definition.
It is a runner, and so implements the :func:`start`, :func:`stop`,
and :func:`get` methods.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.threads: Dict[str, MultiThreadedRunnerThread] = {}
self.is_stopping = Event()
def start(self) -> None:
"""
Starts the runner.
A multi-threaded runner thread is started for each
'follower' application in the system, and constructs
an instance of each non-follower leader application in
the system. The followers are then setup to follow the
applications they follow (have a notification log reader
with the notification log of the leader), and their leaders
are setup to lead the follower's thead (send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
app_class = self.system.follower_cls(name)
thread = MultiThreadedRunnerThread(
app_class=app_class,
is_stopping=self.is_stopping,
)
self.threads[name] = thread
thread.start()
if (not thread.is_running.wait(timeout=5)) or thread.has_stopped.is_set():
self.stop()
raise Exception(f"Thread for '{app_class.__name__}' failed to start")
self.apps[name] = thread.app
# Construct non-follower leaders.
for name in self.system.leaders_only:
app = self.system.leader_cls(name)()
self.apps[name] = app
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
follower.follow(leader.__class__.__name__, leader.log)
thread = self.threads[edge[1]]
leader.lead(thread)
def stop(self) -> None:
self.is_stopping.set()
for thread in self.threads.values():
thread.is_prompted.set()
thread.join()
@property
def has_stopped(self) -> bool:
return all([t.has_stopped.is_set() for t in self.threads.values()])
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunnerThread(Promptable, Thread):
"""
Runs one process application for a
:class:`~eventsourcing.system.MultiThreadedRunner`.
A multi-threaded runner thread is a :class:`~eventsourcing.system.Promptable`
object, and implements the :func:`receive_prompt` method by collecting
prompted names and setting its threading event 'is_prompted'.
A multi-threaded runner thread is a Python :class:`threading.Thread` object,
and implements the thread's :func:`run` method by waiting until the
'is_prompted' event has been set and then calling its process application's
:func:`~eventsourcing.system.Follower.pull_and_process`
method once for each prompted name. It is expected that
the process application will have been set up by the runner
with a notification log reader from which event notifications
will be pulled.
"""
def __init__(
self,
app_class: Type[Follower],
is_stopping: Event,
):
super().__init__()
self.app_class = app_class
self.is_stopping = is_stopping
self.has_stopped = Event()
self.has_errored = Event()
self.is_prompted = Event()
self.prompted_names: List[str] = []
self.prompted_names_lock = Lock()
self.setDaemon(True)
self.is_running = Event()
def run(self) -> None:
"""
Begins by constructing an application instance from
given application class and then loops forever until
stopped. The loop blocks on waiting for the 'is_prompted'
event to be set, then forwards the prompts already received
to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
try:
self.app: Follower = self.app_class()
except Exception:
self.has_errored.set()
self.has_stopped.set()
raise
finally:
self.is_running.set() # pragma: no cover
# -----------------------^ weird branch coverage thing with Python 3.9
try:
while True:
self.is_prompted.wait()
if self.is_stopping.is_set():
self.has_stopped.set()
break
with self.prompted_names_lock:
prompted_names = self.prompted_names
self.prompted_names = []
self.is_prompted.clear()
for name in prompted_names:
self.app.pull_and_process(name)
except Exception:
self.has_errored.set()
self.has_stopped.set()
self.is_stopping.is_set()
raise
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
"""
with self.prompted_names_lock:
if leader_name not in self.prompted_names:
self.prompted_names.append(leader_name)
self.is_prompted.set()
class NotificationLogReader:
"""
Reads domain event notifications from a notification log.
"""
DEFAULT_SECTION_SIZE = 10
def __init__(
self,
notification_log: NotificationLog,
section_size: int = DEFAULT_SECTION_SIZE,
):
"""
Initialises a reader with the given notification log,
and optionally a section size integer which determines
the requested number of domain event notifications in
each section retrieved from the notification log.
"""
self.notification_log = notification_log
self.section_size = section_size
def read(self, *, start: int) -> Iterator[Notification]:
"""
Returns a generator that yields event notifications
from the reader's notification log, starting from
given start position (a notification ID).
This method traverses the linked list of sections presented by
a notification log, and yields the individual event notifications
that are contained in each section. When all the event notifications
from a section have been yielded, the reader will retrieve the next
section, and continue yielding event notification until all subsequent
event notifications in the notification log from the start position
have been yielded.
"""
section_id = "{},{}".format(start, start + self.section_size - 1)
while True:
section: Section = self.notification_log[section_id]
for item in section.items:
# Todo: Reintroduce if supporting
# sections with regular alignment?
# if item.id < start:
# continue
yield item
if section.next_id is None:
break
else:
section_id = section.next_id
def select(self, *, start: int) -> Iterator[Notification]:
"""
Returns a generator that yields event notifications
from the reader's notification log, starting from
given start position (a notification ID).
This method selects a limited list of notifications from a
notification log and yields event notifications individually.
When all the event notifications in the list are yielded,
the reader will retrieve another list, and continue yielding
event notification until all subsequent event notifications
in the notification log from the start position have been
yielded.
"""
while True:
notifications = self.notification_log.select(start, self.section_size)
for notification in notifications:
yield notification
if len(notifications) < self.section_size:
break
else:
start = notifications[-1].id + 1
|
|
#!/usr/bin/python -Wall
# ================================================================
# See the paper ... this is hard to describe without a picture! :)
# ----------------------------------------------------------------
# John Kerl
# kerl.john.r@gmail.com
# 2010-04-15
# ================================================================
from __future__ import division # 7/2 = 3.5, not 3
from math import * # ceil and floor
import sys, re, copy
import tabutil_m, stats_m
eps = 1e-8
# ----------------------------------------------------------------
def get_H_bonds(p, q):
H_bonds = []
for y in range(0, p): # 0, 1, ..., p-1
x1 = int(floor(q/p * (y + eps)))
x2 = int(ceil (q/p * (y + eps)))
left = [x1, y] # above
right = [x2, y] # below
H_bonds.append([left, right])
return H_bonds
def get_V_bonds(p, q):
V_bonds = []
for x in range(1, q+1): # 1, 2, ..., q
y1 = int(floor(p*x/q - eps))
y2 = int(ceil (p*x/q - eps))
up = [x, y2] # above
down = [x, y1] # below
V_bonds.append([up, down])
return V_bonds
def get_H_and_V_bonds(p, q):
return get_H_bonds(p, q) + get_V_bonds(p, q)
def get_points_above(p, q):
points_above = []
for x in range(0, q+1): # i.e. 0 to q inclusive
y = int(ceil (p*x/q - eps))
points_above.append([x, y])
return points_above
def get_points_below(p, q):
points_below = []
for x in range(0, q+1): # i.e. 0 to q inclusive
y = int(floor (p*x/q - eps))
points_below.append([x, y])
return points_below
# ----------------------------------------------------------------
def look_up_in_hash(hash, above_or_below, p, q, x0, y0, N):
try:
c = hash[((p, q), (x0, y0), N)]
return c
except:
print >> sys.stderr, \
'z0=(%d,%d) not found %s p=%d, q=%d, N=%d.' % \
(x0, y0, above_or_below, p, q, N)
sys.exit(1)
# ----------------------------------------------------------------
def get_wbar_of_Npq(p, q, N, bonds, above_counts_hash, below_counts_hash, \
include_above=True, include_below=True):
sum = 0.0
num_bonds = len(bonds)
for [start, end] in bonds:
[start_x0, start_y0] = start # above
[end_x0, end_y0] = end # below
c1 = 1
c2 = 2
if include_above:
c1 = look_up_in_hash(above_counts_hash, 'above', p, q, \
start_x0, start_y0, N)
if include_below:
c2 = look_up_in_hash(below_counts_hash, 'below', p, q, \
end_x0, end_y0, N)
sum += c1 * c2
#return sum / sqrt(p**2+q**2)
return sum / num_bonds
# ----------------------------------------------------------------
def get_above_scaled_count_of_Npq(p, q, N, points_above, above_counts_hash):
sum = 0.0
num_points = len(points_above)
for [x, y] in points_above:
c1 = look_up_in_hash(above_counts_hash, 'above', p, q, x, y, N)
sum += c1
#return sum
#return sum / sqrt(p**2+q**2)
return sum / num_points
# ----------------------------------------------------------------
def get_below_scaled_count_of_Npq(p, q, N, points_below, below_counts_hash):
sum = 0.0
num_points = len(points_below)
for [x, y] in points_below:
c2 = look_up_in_hash(below_counts_hash, 'below', p, q, x, y, N)
sum += c2
#return sum
#return sum / sqrt(p**2+q**2)
return sum / num_points
# ----------------------------------------------------------------
# Example input: data/raw_counts_1_3.txt
#
# #N (0,0) (1,1) (2,1)
# #- -------- -------- --------
# 10 4268 6300 5570
# 11 11379 16742 14334
# 12 29472 43472 38316
# 13 78434 115421 99117
# 14 203739 300433 264235
# 15 541422 797137 685851
# 16 1409539 2078243 1824875
# 17 3741997 5510907 4748493
# 18 9758256 14387069 12617423
# 19 25885698 38130894 32891904
# 20 67592411 99655040 87310954
# Hash all these data on the key
#
# ((p, q), (x0, y0), N)
def load_counts_hash(pqs, above_or_below, datadir):
counts_hash = {}
for [p, q] in pqs:
counts_file = '%s/raw_counts_%s_%d_%d.txt' \
% (datadir, above_or_below, p, q)
# Read the data file
Ncc_columns = tabutil_m.float_columns_from_file(counts_file)
Ns = Ncc_columns[0]
Ncc_rows = tabutil_m.float_rows_from_file (counts_file)
z0_labels = tabutil_m.labels_from_file (counts_file)
# Omit the 'N' label.
z0_labels = z0_labels[1:]
num_rows = len(Ncc_rows)
num_columns = len(z0_labels)
# Parse the column labels as (x0,y0) pairs
x0y0_pairs = []
for z0_label in z0_labels:
[x0, y0] = re.split(',', re.sub('[()]', '', z0_label))
[x0, y0] = [int(x0), int(y0)]
x0y0_pairs.append([x0, y0])
for i in range(0, num_rows):
N = int(Ns[i])
for j in range(0, num_columns):
[x0, y0] = x0y0_pairs[j]
key = ((p, q), (x0, y0), N)
data = Ncc_rows[i][j+1]
counts_hash[key] = data
return counts_hash
def get_Ns(datadir, p, q):
counts_file = '%s/raw_counts_%s_%d_%d.txt' % (datadir, 'above', p, q)
Ncc_columns = tabutil_m.float_columns_from_file(counts_file)
return Ncc_columns[0]
# ----------------------------------------------------------------
# Need counts data loaded into a hash by:
# ((p, q), (x0, y0), N)
# Example input: data/raw_counts_above_1_3.txt
#
# #N (0,0) (1,1) (2,1)
# #- -------- -------- --------
# 10 4268 6300 5570
# 11 11379 16742 14334
# 12 29472 43472 38316
# 13 78434 115421 99117
# 14 203739 300433 264235
# 15 541422 797137 685851
# 16 1409539 2078243 1824875
# 17 3741997 5510907 4748493
# 18 9758256 14387069 12617423
# 19 25885698 38130894 32891904
# 20 67592411 99655040 87310954
# ----------------------------------------------------------------
pqs = [
[0, 1],
[1, 10],
[1, 9],
[1, 8],
[1, 7],
[1, 6],
[1, 5],
[2, 9],
[1, 4],
[2, 7],
[3, 10],
[1, 3],
[3, 8],
[2, 5],
[3, 7],
[4, 9],
[1, 2],
[3, 6],
[5, 9],
[4, 7],
[3, 5],
[5, 8],
[2, 3],
[7, 10],
[5, 7],
[3, 4],
[7, 9],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[1, 1],
]
print_counts_hashes = 0
print_raw_bonds = 1
datadir = './data'
if len(sys.argv) == 2:
datadir = sys.argv[1]
above_counts_hash = load_counts_hash(pqs, 'above', datadir)
below_counts_hash = load_counts_hash(pqs, 'below', datadir)
if print_counts_hashes:
print 'above counts hash:'
for key in above_counts_hash.keys():
print key, above_counts_hash[key]
print
print 'below counts hash:'
for key in below_counts_hash.keys():
print key, below_counts_hash[key]
print
thetas = []
wbar_output_rows = []
aw_output_rows = []
bw_output_rows = []
ac_output_rows = []
bc_output_rows = []
for [p, q] in pqs:
theta = atan(p/q) * 180/pi
thetas.append(theta)
wbar_output_row = []
aw_output_row = []
bw_output_row = []
ac_output_row = []
bc_output_row = []
bonds = get_H_and_V_bonds(p, q)
points_above = get_points_above(p, q)
points_below = get_points_below(p, q)
if print_raw_bonds:
print 'Bonds for (p,q) = (%d,%d):' % (p, q)
for bond in bonds:
print bond
print
print 'Points above:'
for [x, y] in points_above:
print ' (%d,%d)' % (x, y)
print
print 'Points below:'
for [x, y] in points_below:
print ' (%d,%d)' % (x, y)
print
Ns = get_Ns(datadir, p, q)
wNs = map(lambda N: 2*N+1, Ns)
#wNs = map(lambda N: 2*N, Ns)
for N in Ns:
wbar = get_wbar_of_Npq(p, q, N, bonds, \
above_counts_hash, below_counts_hash, \
include_above=True, include_below=True)
wbar_output_row.append(wbar)
for N in Ns:
above_wbar = get_wbar_of_Npq(p, q, N, bonds, \
above_counts_hash, below_counts_hash, \
include_above=True, include_below=False)
below_wbar = get_wbar_of_Npq(p, q, N, bonds, \
above_counts_hash, below_counts_hash, \
include_above=False, include_below=True)
above_scaled_count = get_above_scaled_count_of_Npq(p, q, N, \
points_above, above_counts_hash)
below_scaled_count = get_below_scaled_count_of_Npq(p, q, N, \
points_below, below_counts_hash)
aw_output_row.append(above_wbar)
bw_output_row.append(below_wbar)
ac_output_row.append(above_scaled_count)
bc_output_row.append(below_scaled_count)
wbar_output_rows.append(wbar_output_row)
aw_output_rows.append(aw_output_row)
bw_output_rows.append(bw_output_row)
ac_output_rows.append(ac_output_row)
bc_output_rows.append(bc_output_row)
print 'Using data directory "%s"' % (datadir)
iospecs = [
[wNs, wbar_output_rows,'w_theta_with_N_series.txt', '%d', False],
[Ns, aw_output_rows, 'aw_theta_with_N_series.txt', '%d', False],
[Ns, bw_output_rows, 'bw_theta_with_N_series.txt', '%d', False],
[Ns, ac_output_rows, 'ac_theta_with_N_series.txt', '%d', False],
[Ns, bc_output_rows, 'bc_theta_with_N_series.txt', '%d', False],
[wNs, wbar_output_rows,'w_N_with_theta_series.txt', '%.4f', True],
[Ns, aw_output_rows, 'aw_N_with_theta_series.txt', '%.4f', True],
[Ns, bw_output_rows, 'bw_N_with_theta_series.txt', '%.4f', True],
[Ns, ac_output_rows, 'ac_N_with_theta_series.txt', '%.4f', True],
[Ns, bc_output_rows, 'bc_N_with_theta_series.txt', '%.4f', True]]
for [xNs, output_rows, output_file_name, label_format, transpose] \
in iospecs:
tabutil_m.matrix_and_labels_to_file(
output_rows, # matrix
'theta', # row_index_name
thetas, # row_index_values
'N', # col_index_name
xNs, # col_index_values
output_file_name, # file_name
transpose,
matrix_format='%11.7f',
label_format=label_format)
print 'Wrote %s' % (output_file_name)
# ----------------------------------------------------------------
# Now do w_theta_over_w_zero
w_theta_over_w_zero = copy.deepcopy(wbar_output_rows)
output_file_name = 'w_theta_w_zero.txt'
nr = len(w_theta_over_w_zero)
nc = len(w_theta_over_w_zero[0])
for i in range(0, nr):
for j in range(0, nc):
w_theta_over_w_zero[i][j] = wbar_output_rows[i][j] / wbar_output_rows[0][j]
iospecs = [
['w_ratio_theta_with_N_series.txt', False],
['w_ratio_N_with_theta_series.txt', True]]
for [output_file_name, transpose] in iospecs:
tabutil_m.matrix_and_labels_to_file(
w_theta_over_w_zero, # matrix
'theta', # row_index_name
thetas, # row_index_values
'N', # col_index_name
wNs, # col_index_values
output_file_name, # file_name
transpose=transpose,
matrix_format='%11.7f',
label_format=label_format)
print 'Wrote %s' % (output_file_name)
|
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to enforce different constraints on flags.
Flags validators can be registered using following functions / decorators:
flags.register_validator
@flags.validator
flags.register_multi_flags_validator
@flags.multi_flags_validator
Three convenience functions are also provided for common flag constraints:
flags.mark_flag_as_required
flags.mark_flags_as_required
flags.mark_flags_as_mutual_exclusive
See their docstring in this module for a usage manual.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.flags import _exceptions
from absl.flags import _flagvalues
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SingleFlagValidator and
multi_flags_validator for a detailed description.
message: str, error message to be shown to the user.
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered.
self.insertion_index = Validator.validators_count
def verify(self, flag_values):
"""Verifies that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Raises:
Error: Raised if constraint is not satisfied.
"""
param = self._get_input_to_checker_function(flag_values)
if not self.checker(param):
raise _exceptions.ValidationError(self.message)
def get_flags_names(self):
"""Returns the names of the flags checked by this validator.
Returns:
[string], names of the flags.
"""
raise NotImplementedError('This method should be overloaded')
def print_flags_with_values(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
The input to be given to checker. The return type depends on the specific
validator.
"""
raise NotImplementedError('This method should be overloaded')
class SingleFlagValidator(Validator):
"""Validator behind register_validator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error message to be shown to the user if validator's
condition is not satisfied.
"""
super(SingleFlagValidator, self).__init__(checker, message)
self.flag_name = flag_name
def get_flags_names(self):
return [self.flag_name]
def print_flags_with_values(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Returns:
object, the input to be given to checker.
"""
return flag_values[self.flag_name].value
class MultiFlagsValidator(Validator):
"""Validator behind register_multi_flags_validator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [str], containing names of the flags used by checker.
checker: function to verify the validator.
input - dict, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean,
etc).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error message to be shown to the user if validator's
condition is not satisfied
"""
super(MultiFlagsValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _get_input_to_checker_function(self, flag_values):
"""Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Returns:
dict, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def print_flags_with_values(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def get_flags_names(self):
return self.flag_names
def register_validator(flag_name,
checker,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: str, name of the flag to be checked.
checker: callable, a function to validate the flag.
input - A single positional argument: The value of the corresponding
flag (string, boolean, etc. This value will be passed to checker
by the library).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError(desired_error_message).
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
v = SingleFlagValidator(flag_name, checker, message)
_add_validator(flag_values, v)
def validator(flag_name, message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""A function decorator for defining a flag validator.
Registers the decorated function as a validator for flag_name, e.g.
@flags.validator('foo')
def _CheckFoo(foo):
...
See register_validator() for the specification of checker function.
Args:
flag_name: str, name of the flag to be checked.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
def decorate(function):
register_validator(flag_name, function,
message=message,
flag_values=flag_values)
return function
return decorate
def register_multi_flags_validator(flag_names,
multi_flags_checker,
message='Flags validation failed',
flag_values=_flagvalues.FLAGS):
"""Adds a constraint to multiple flags.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_names: [str], a list of the flag names to be checked.
multi_flags_checker: callable, a function to validate the flag.
input - dict, with keys() being flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
output - bool, True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise flags.ValidationError.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Raises:
AttributeError: Raised when a flag is not registered as a valid flag name.
"""
v = MultiFlagsValidator(
flag_names, multi_flags_checker, message)
_add_validator(flag_values, v)
def multi_flags_validator(flag_names,
message='Flag validation failed',
flag_values=_flagvalues.FLAGS):
"""A function decorator for defining a multi-flag validator.
Registers the decorated function as a validator for flag_names, e.g.
@flags.multi_flags_validator(['foo', 'bar'])
def _CheckFooBar(flags_dict):
...
See register_multi_flags_validator() for the specification of checker
function.
Args:
flag_names: [str], a list of the flag names to be checked.
message: str, error text to be shown to the user if checker returns False.
If checker raises flags.ValidationError, message from the raised
error will be shown.
flag_values: flags.FlagValues, optional FlagValues instance to validate
against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: Raised when a flag is not registered as a valid flag name.
"""
def decorate(function):
register_multi_flags_validator(flag_names,
function,
message=message,
flag_values=flag_values)
return function
return decorate
def mark_flag_as_required(flag_name, flag_values=_flagvalues.FLAGS):
"""Ensures that flag is not None during program execution.
Registers a flag validator, which will follow usual validator rules.
Important note: validator will pass for any non-None value, such as False,
0 (zero), '' (empty string) and so on.
It is recommended to call this method like this:
if __name__ == '__main__':
flags.mark_flag_as_required('your_flag_name')
app.run()
Because validation happens at app.run() we want to ensure required-ness
is enforced at that time. However, you generally do not want to force
users who import your code to have additional required flags for their
own binaries or tests.
Args:
flag_name: str, name of the flag
flag_values: flags.FlagValues, optional FlagValues instance where the flag
is defined.
Raises:
AttributeError: Raised when flag_name is not registered as a valid flag
name.
"""
if flag_values[flag_name].default is not None:
warnings.warn(
'Flag --%s has a non-None default value; therefore, '
'mark_flag_as_required will pass even if flag is not specified in the '
'command line!' % flag_name)
register_validator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS):
"""Ensures that flags are not None during program execution.
Recommended usage:
if __name__ == '__main__':
flags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])
app.run()
Args:
flag_names: Sequence[str], names of the flags.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
Raises:
AttributeError: If any of flag name has not already been defined as a flag.
"""
for flag_name in flag_names:
mark_flag_as_required(flag_name, flag_values)
def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=_flagvalues.FLAGS):
"""Ensures that only one flag among flag_names is set.
Important note: validator will pass for any non-None value, such as False,
0 (zero), '' (empty string) and so on. For multi flags, this means that the
default needs to be None not [].
Args:
flag_names: [str], names of the flags.
required: bool, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
"""
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
message = ('%s one of (%s) must be specified.' %
('Exactly' if required else 'At most', ', '.join(flag_names)))
raise _exceptions.ValidationError(message)
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values)
def _add_validator(fv, validator_instance):
"""Register new flags validator to be checked.
Args:
fv: flags.FlagValues, the FlagValues instance to add the validator.
validator_instance: validators.Validator, the validator to add.
Raises:
KeyError: Raised when validators work with a non-existing flag.
"""
for flag_name in validator_instance.get_flags_names():
fv[flag_name].validators.append(validator_instance)
|
|
"""Tests the experiment module of pyexperiment
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import argparse
import io
import mock
import tempfile
import logging
from pyexperiment import experiment
from pyexperiment.utils.stdout_redirector import stdout_redirector
from pyexperiment import state
from pyexperiment import conf
from pyexperiment import Logger
class TestExperiment(unittest.TestCase):
"""Test the experiment module
"""
def setUp(self):
"""Setup the test fixture
"""
pass
def tearDown(self):
"""Tear down the test fixture
"""
pass
def test_main_runs_function(self):
"""Test running main calls function
"""
run = [False]
def custom_function():
"""User function
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "custom_function"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertTrue(run[0])
self.assertEqual(len(buf.getvalue()), 0)
def test_main_shows_commands(self):
"""Test running main shows commands
"""
def custom_function1():
"""User function
"""
pass
def custom_function2():
"""User function
"""
pass
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_commands"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function1, custom_function2])
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"custom_function1")
self.assertRegexpMatches(buf.getvalue(), r"custom_function2")
def test_main_not_enough_arguments(self):
"""Test running main without command
"""
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"[Nn]ot enough arguments")
def test_main_does_not_run_function(self):
"""Test running main does not call unnecessary function but complains
"""
run = [False]
def custom_function():
"""User function
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertFalse(run[0])
self.assertNotEqual(len(buf.getvalue()), 0)
def test_main_gives_help(self):
"""Test running help shows docstring
"""
run = [False]
def custom_function():
"""This should be printed!!
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help", "custom_function"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertFalse(run[0])
self.assertIn("This should be printed!!", buf.getvalue())
def test_main_complains_on_help(self):
"""Test running help complains on help for wrong command
"""
def custom_function():
"""Foo function
"""
pass
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help", "foo"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertRegexpMatches(buf.getvalue(), r"[cC]ommand")
self.assertRegexpMatches(buf.getvalue(), r"not")
self.assertRegexpMatches(buf.getvalue(), r"foo")
def test_main_runs_test(self):
"""Test running main calls tests when needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "test"]
with mock.patch.object(unittest, 'TextTestRunner') as mock_method:
experiment.main(commands=[], tests=[ExampleTest])
self.assertEqual(mock_method.call_count, 1)
def test_main_shows_test(self):
"""Test running main shows tests when needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_tests"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(tests=[ExampleTest])
self.assertRegexpMatches(buf.getvalue(), r"ExampleTest")
def test_main_doesnt_test_on_help(self):
"""Test running main does not call tests when not needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-h"]
buf = io.StringIO()
with stdout_redirector(buf):
with mock.patch.object(unittest, 'TextTestRunner') as mock_method:
try:
experiment.main(commands=[], tests=[ExampleTest])
self.assertEqual(mock_method.call_count, 0)
except SystemExit:
pass
@mock.patch('pyexperiment.experiment.embed_interactive')
def test_main_runs_interactive(self, mock_interactive):
"""Test running main runs interactive session
"""
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--interactive"]
experiment.main(commands=[], tests=[])
self.assertTrue(mock_interactive.call_count == 1)
def test_main_shows_empty_state(self):
"""Test running main shows empty state
"""
with tempfile.NamedTemporaryFile() as temp:
state['bla'] = 12
del state['bla']
state.save(temp.name)
spec = ('[pyexperiment]\n'
'state_filename = string(default=%s)' % temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(config_spec=spec)
self.assertRegexpMatches(buf.getvalue(), r"[Ss]tate empty")
def test_main_shows_default_state(self):
"""Test running main shows the default state
"""
with tempfile.NamedTemporaryFile() as temp:
state['bla'] = 12
state.save(temp.name)
spec = ('[pyexperiment]\n'
'state_filename = string(default=%s)' % temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(config_spec=spec)
self.assertRegexpMatches(buf.getvalue(), r"bla")
self.assertRegexpMatches(buf.getvalue(), r"12")
def test_main_shows_other_state(self):
"""Test running main shows state from file
"""
with tempfile.NamedTemporaryFile() as temp:
state['foo'] = 42
state.save(temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state", temp.name]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
self.assertRegexpMatches(buf.getvalue(), r"foo")
self.assertRegexpMatches(buf.getvalue(), r"42")
def test_main_overrides_option(self):
"""Test running main called with -o works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
self.assertEqual(conf['bla'], 'foo')
conf['bla'] = 'bla'
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-o", "bla", "foo", "foo_fun"]
self.assertFalse(called[0])
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['bla'], 'foo')
def test_main_overrides_verbosity(self):
"""Test running main called with --verbosity works as expected
"""
log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(log_stream)
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--verbosity", "DEBUG", "foo_fun"]
self.assertFalse(called[0])
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.verbosity'], 'DEBUG')
called[0] = False
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--verbosity", "WARNING", "foo_fun"]
self.assertFalse(called[0])
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.verbosity'], 'WARNING')
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NAS-FPN.
Golnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le.
NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection.
https://arxiv.org/abs/1904.07392. CVPR 2019.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
from modeling.architecture import resnet
from ops import spatial_transform_ops
# The fixed NAS-FPN architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, combine_fn, (input_offset0, input_offset1), is_output).
NASFPN_BLOCK_SPECS = [
(4, 'attention', (1, 3), False),
(4, 'sum', (1, 5), False),
(3, 'sum', (0, 6), True),
(4, 'sum', (6, 7), True),
(5, 'attention', (7, 8), True),
(7, 'attention', (6, 9), True),
(6, 'attention', (9, 10), True),
]
class BlockSpec(object):
"""A container class that specifies the block configuration for NAS-FPN."""
def __init__(self, level, combine_fn, input_offsets, is_output):
self.level = level
self.combine_fn = combine_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for NAS-FPN."""
if not block_specs:
block_specs = NASFPN_BLOCK_SPECS
logging.info('Building NAS-FPN block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
def block_group(inputs,
filters,
strides,
block_fn,
block_repeats,
conv2d_op=None,
activation=tf.nn.swish,
batch_norm_activation=nn_ops.BatchNormActivation(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Creates one group of blocks for NAS-FPN."""
if block_fn == 'conv':
inputs = conv2d_op(
inputs,
filters=filters,
kernel_size=(3, 3),
padding='same',
data_format=data_format,
name='conv')
inputs = batch_norm_activation(
inputs, is_training=is_training, relu=False, name='bn')
inputs = dropblock(inputs, is_training=is_training)
return inputs
if block_fn != 'bottleneck':
raise ValueError('Block function {} not implemented.'.format(block_fn))
_, _, _, num_filters = inputs.get_shape().as_list()
block_fn = nn_blocks.bottleneck_block
use_projection = not (num_filters == (filters * 4) and strides == 1)
return resnet.block_group(
inputs=inputs,
filters=filters,
strides=strides,
use_projection=use_projection,
block_fn=block_fn,
block_repeats=block_repeats,
activation=activation,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
name=name,
is_training=is_training)
def resample_feature_map(feat,
level,
target_level,
is_training,
target_feat_dims=256,
conv2d_op=tf.layers.conv2d,
batch_norm_activation=nn_ops.BatchNormActivation(),
data_format='channels_last',
name=None):
"""Resample input feature map to have target number of channels and width."""
feat_dims = feat.get_shape().as_list()[3]
with tf.variable_scope('resample_{}'.format(name)):
if feat_dims != target_feat_dims:
feat = conv2d_op(
feat,
filters=target_feat_dims,
kernel_size=(1, 1),
padding='same',
data_format=data_format)
feat = batch_norm_activation(
feat,
is_training=is_training,
relu=False,
name='bn')
if level < target_level:
stride = int(2**(target_level-level))
feat = tf.layers.max_pooling2d(
inputs=feat,
pool_size=stride,
strides=[stride, stride],
padding='SAME')
elif level > target_level:
scale = int(2**(level - target_level))
feat = spatial_transform_ops.nearest_upsampling(feat, scale=scale)
return feat
def global_attention(feat0, feat1):
with tf.variable_scope('global_attention'):
m = tf.reduce_max(feat0, axis=[1, 2], keepdims=True)
m = tf.sigmoid(m)
return feat0 + feat1 * m
class Nasfpn(object):
"""Feature pyramid networks."""
def __init__(self,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
fpn_feat_dims=256,
num_repeats=7,
use_separable_conv=False,
dropblock=nn_ops.Dropblock(),
block_fn='conv',
block_repeats=1,
activation='relu',
batch_norm_activation=nn_ops.BatchNormActivation(
activation='relu'),
init_drop_connect_rate=None,
data_format='channels_last',
use_sum_for_combination=False):
"""NAS-FPN initialization function.
Args:
min_level: `int` minimum level in NAS-FPN output feature maps.
max_level: `int` maximum level in NAS-FPN output feature maps.
block_specs: a list of BlockSpec objects that specifies the SpineNet
network topology. By default, the previously discovered architecture is
used.
fpn_feat_dims: `int` number of filters in FPN layers.
num_repeats: number of repeats for feature pyramid network.
use_separable_conv: `bool`, if True use separable convolution for
convolution in NAS-FPN layers.
dropblock: a Dropblock layer.
block_fn: `string` representing types of block group support: conv,
bottleneck.
block_repeats: `int` representing the number of repeats per block group
when block group is bottleneck.
activation: activation function. Support 'relu' and 'swish'.
batch_norm_activation: an operation that includes a batch normalization
layer followed by an optional activation layer.
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: An optional string from: "channels_last", "channels_first".
Defaults to "channels_last".
use_sum_for_combination: `bool`, if True only 'sum' is used for combining
two nodes.
"""
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._fpn_feat_dims = fpn_feat_dims
self._num_repeats = num_repeats
self._block_fn = block_fn
self._block_repeats = block_repeats
if use_separable_conv:
self._conv2d_op = functools.partial(
tf.layers.separable_conv2d, depth_multiplier=1)
else:
self._conv2d_op = tf.layers.conv2d
self._dropblock = dropblock
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._batch_norm_activation = batch_norm_activation
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
self._resample_feature_map = functools.partial(
resample_feature_map,
target_feat_dims=fpn_feat_dims,
conv2d_op=self._conv2d_op,
batch_norm_activation=batch_norm_activation,
data_format=self._data_format)
self._use_sum_for_combination = use_sum_for_combination
def __call__(self, multilevel_features, is_training=False):
"""Returns the FPN features for a given multilevel features.
Args:
multilevel_features: a `dict` containing `int` keys for continuous feature
levels, e.g., [2, 3, 4, 5]. The values are corresponding features with
shape [batch_size, height_l, width_l, num_filters].
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels
[min_level, min_level + 1, ..., max_level]. The values are corresponding
FPN features with shape [batch_size, height_l, width_l, fpn_feat_dims].
"""
feats = []
for level in range(self._min_level, self._max_level + 1):
if level in list(multilevel_features.keys()):
# TODO(tsungyi): The original impl. does't downsample the backbone feat.
feats.append(self._resample_feature_map(
multilevel_features[level], level, level, is_training,
name='l%d' % level))
else:
# Adds a coarser level by downsampling the last feature map.
feats.append(self._resample_feature_map(
feats[-1], level - 1, level, is_training,
name='p%d' % level))
with tf.variable_scope('fpn_cells'):
for i in range(self._num_repeats):
with tf.variable_scope('cell_{}'.format(i)):
logging.info('building cell %s', i)
feats_dict = self._build_feature_pyramid(feats, is_training)
feats = [feats_dict[level] for level in range(
self._min_level, self._max_level + 1)]
return feats_dict
def _build_feature_pyramid(self, feats, is_training):
"""Function to build a feature pyramid network."""
# Number of output connections from each feat.
num_output_connections = [0] * len(feats)
num_output_levels = self._max_level - self._min_level + 1
feat_levels = list(range(self._min_level, self._max_level + 1))
for i, sub_policy in enumerate(self._block_specs):
with tf.variable_scope('sub_policy{}'.format(i)):
logging.info('sub_policy %d : %s', i, sub_policy)
new_level = sub_policy.level
# Checks the range of input_offsets.
for input_offset in sub_policy.input_offsets:
if input_offset >= len(feats):
raise ValueError(
'input_offset ({}) is larger than num feats({})'.format(
input_offset, len(feats)))
input0 = sub_policy.input_offsets[0]
input1 = sub_policy.input_offsets[1]
# Update graph with inputs.
node0 = feats[input0]
node0_level = feat_levels[input0]
num_output_connections[input0] += 1
node0 = self._resample_feature_map(
node0, node0_level, new_level, is_training,
name='0_{}_{}'.format(input0, len(feats)))
node1 = feats[input1]
node1_level = feat_levels[input1]
num_output_connections[input1] += 1
node1 = self._resample_feature_map(
node1, node1_level, new_level, is_training,
name='1_{}_{}'.format(input1, len(feats)))
# Combine node0 and node1 to create new feat.
if self._use_sum_for_combination or sub_policy.combine_fn == 'sum':
new_node = node0 + node1
elif sub_policy.combine_fn == 'attention':
if node0_level >= node1_level:
new_node = global_attention(node0, node1)
else:
new_node = global_attention(node1, node0)
else:
raise ValueError('unknown combine_fn `{}`.'
.format(sub_policy.combine_fn))
# Add intermediate nodes that do not have any connections to output.
if sub_policy.is_output:
for j, (feat, feat_level, num_output) in enumerate(
zip(feats, feat_levels, num_output_connections)):
if num_output == 0 and feat_level == new_level:
num_output_connections[j] += 1
feat_ = self._resample_feature_map(
feat, feat_level, new_level, is_training,
name='fa_{}_{}'.format(i, j))
new_node += feat_
with tf.variable_scope('op_after_combine{}'.format(len(feats))):
new_node = self._activation(new_node)
new_node = block_group(
inputs=new_node,
filters=self._fpn_feat_dims,
strides=1,
block_fn=self._block_fn,
block_repeats=self._block_repeats,
conv2d_op=self._conv2d_op,
activation=self._activation,
batch_norm_activation=self._batch_norm_activation,
dropblock=self._dropblock,
drop_connect_rate=self._init_drop_connect_rate,
data_format=self._data_format,
name='block_{}'.format(i),
is_training=is_training)
feats.append(new_node)
feat_levels.append(new_level)
num_output_connections.append(0)
output_feats = {}
for i in range(len(feats) - num_output_levels, len(feats)):
level = feat_levels[i]
output_feats[level] = feats[i]
logging.info('Output feature pyramid: %s', output_feats)
return output_feats
|
|
from app.models import *
from utils.common import *
import app.cs_logger
from datetime import datetime, timedelta
from mock import Mock, patch
import importlib
import os
import pytest
import random
ENV = os.environ["CS_ENV"] = "test"
import utils.email_db_report
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
BASE_DIR = os.path.join(TEST_DIR, "../")
END_DT = datetime(2017,12,31)
DAYS = 7
get_count = lambda: random.choice(range(5,15))
PAGES_PER_DAY = get_count()
POSTS_PER_DAY = get_count()
COMMENTS_PER_POST = get_count()
USERS_PER_DAY = get_count()
MOD_ACTIONS_PER_DAY = get_count()
EXPERIMENTS_PER_DAY = get_count()
THINGS_PER_EXPERIMENT = get_count()
SNAPSHOTS_PER_THING = get_count()
ACTIONS_PER_EXPERIMENT = get_count()
SUBREDDIT_ID = "mouw"
SUBREDDIT_NAME = "science"
db_session = DbEngine(os.path.join(TEST_DIR, "../", "config") + "/{env}.json".format(env=ENV)).new_session()
log = app.cs_logger.get_logger(ENV, BASE_DIR)
experiment_test_info = {}
def get_experiment_test_info(day, num):
name = "%d_%d" % (day, num)
return experiment_test_info[name], name
def clear_all_tables():
for table in reversed(Base.metadata.sorted_tables):
db_session.execute(table.delete())
db_session.commit()
def setup_function(function):
clear_all_tables()
def teardown_function(function):
clear_all_tables()
experiment_test_info = {}
@pytest.fixture
def init_front_pages():
assert len(db_session.query(FrontPage).all()) == 0
for day in range(DAYS):
for _ in range(PAGES_PER_DAY):
for page_type in PageType:
db_session.add(FrontPage(
created_at = END_DT - timedelta(days=day),
page_type = page_type.value,
is_utc = True))
db_session.commit()
@pytest.fixture
def init_subreddit_pages(init_subreddits):
assert len(db_session.query(Subreddit).all()) == 1
assert len(db_session.query(SubredditPage).all()) == 0
for day in range(DAYS):
for _ in range(PAGES_PER_DAY):
for page_type in PageType:
db_session.add(SubredditPage(
created_at = END_DT - timedelta(days=day),
page_type = page_type.value,
subreddit_id = SUBREDDIT_ID,
is_utc = True))
db_session.commit()
@pytest.fixture
def init_subreddits():
assert len(db_session.query(Subreddit).all()) == 0
db_session.add(Subreddit(
id = SUBREDDIT_ID,
name = SUBREDDIT_NAME,
created_at = END_DT - timedelta(days=DAYS)))
db_session.commit()
@pytest.fixture
def init_posts(init_subreddits):
assert len(db_session.query(Subreddit).all()) == 1
assert len(db_session.query(Post).all()) == 0
for day in range(DAYS):
for post in range(POSTS_PER_DAY):
db_session.add(Post(
id = "%s_%d_%d" % (SUBREDDIT_ID, day, post),
created_at = END_DT - timedelta(days=day),
subreddit_id = SUBREDDIT_ID))
db_session.commit()
@pytest.fixture
def init_comments(init_posts):
assert len(db_session.query(Subreddit).all()) == 1
assert len(db_session.query(Post).all()) == DAYS * POSTS_PER_DAY
assert len(db_session.query(Comment).all()) == 0
for day in range(DAYS):
for post in range(POSTS_PER_DAY):
post_id = "%s_%d_%d" % (SUBREDDIT_ID, day, post)
for comment in range(COMMENTS_PER_POST):
db_session.add(Comment(
id = "%s_%s" % (post_id, comment),
created_at = END_DT - timedelta(days=day),
subreddit_id = SUBREDDIT_ID,
post_id = post_id))
db_session.commit()
@pytest.fixture
def init_users():
assert len(db_session.query(User).all()) == 0
for day in range(DAYS):
for user in range(USERS_PER_DAY):
dt = END_DT - timedelta(days=day)
db_session.add(User(
name = "user_%d_%d" % (day, user),
created = dt,
first_seen = dt,
last_seen = dt))
db_session.commit()
@pytest.fixture
def init_mod_actions(init_subreddits):
assert len(db_session.query(Subreddit).all()) == 1
assert len(db_session.query(ModAction).all()) == 0
for day in range(DAYS):
for mod_action in range(MOD_ACTIONS_PER_DAY):
db_session.add(ModAction(
id = "%s_%d_%d" % (SUBREDDIT_ID, day, mod_action),
created_at = END_DT - timedelta(days=day),
subreddit_id = SUBREDDIT_ID))
db_session.commit()
@pytest.fixture
def init_experiments():
assert len(db_session.query(Experiment).all()) == 0
for day in range(DAYS):
for experiment in range(EXPERIMENTS_PER_DAY):
dt = END_DT - timedelta(days=day)
experiment = Experiment(
name = "%d_%d" % (day, experiment),
controller = "dummy_controller",
created_at = dt,
start_time = dt,
end_time = dt)
db_session.add(experiment)
db_session.flush()
experiment_test_info[experiment.name] = experiment.id
db_session.commit()
@pytest.fixture
def init_experiment_things(init_experiments):
assert len(db_session.query(Experiment).all()) == DAYS * EXPERIMENTS_PER_DAY
assert len(db_session.query(ExperimentThing).all()) == 0
for day in range(DAYS):
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, experiment_name = get_experiment_test_info(day, experiment)
for thing in range(THINGS_PER_EXPERIMENT):
db_session.add(ExperimentThing(
id = "%s_%d" % (experiment_name, thing),
created_at = END_DT - timedelta(days=day),
object_type = ThingType.SUBMISSION.value,
experiment_id = experiment_id))
db_session.commit()
@pytest.fixture
def init_experiment_thing_snapshots(init_experiment_things):
thing_count = DAYS * EXPERIMENTS_PER_DAY * THINGS_PER_EXPERIMENT
assert len(db_session.query(Experiment).all()) == DAYS * EXPERIMENTS_PER_DAY
assert len(db_session.query(ExperimentThing).all()) == thing_count
assert len(db_session.query(ExperimentThingSnapshot).all()) == 0
for day in range(DAYS):
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, experiment_name = get_experiment_test_info(day, experiment)
for thing in range(THINGS_PER_EXPERIMENT):
experiment_thing_id = "%s_%d" % (experiment_name, thing)
for snapshot in range(SNAPSHOTS_PER_THING):
db_session.add(ExperimentThingSnapshot(
experiment_thing_id = experiment_thing_id,
created_at = END_DT - timedelta(days=day),
object_type = ThingType.SUBMISSION.value,
experiment_id = experiment_id))
db_session.commit()
@pytest.fixture
def init_experiment_actions(init_experiments):
assert len(db_session.query(Experiment).all()) == DAYS * EXPERIMENTS_PER_DAY
assert len(db_session.query(ExperimentAction).all()) == 0
for day in range(DAYS):
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, _ = get_experiment_test_info(day, experiment)
for action in range(ACTIONS_PER_EXPERIMENT):
db_session.add(ExperimentAction(
action = "test",
created_at = END_DT - timedelta(days=day),
experiment_id = experiment_id))
db_session.commit()
def test_generate_reddit_front_page(init_front_pages):
assert len(db_session.query(FrontPage).all()) == DAYS * PAGES_PER_DAY * len(PageType)
report = importlib.reload(utils.email_db_report)
output = set(report.generate_reddit_front_page(END_DT, DAYS, html=False))
assert len(output) == DAYS * len(PageType)
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
for page_type in PageType:
record = (page_type.name, dt.year, dt.month, dt.day, PAGES_PER_DAY)
assert record in output
def test_generate_reddit_subreddit_page(init_subreddit_pages):
assert len(db_session.query(SubredditPage).all()) == DAYS * PAGES_PER_DAY * len(PageType)
report = importlib.reload(utils.email_db_report)
output = set(report.generate_reddit_subreddit_page(END_DT, DAYS, html=False))
assert len(output) == DAYS * len(PageType)
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
for page_type in PageType:
label = "(%s, %s)" % (SUBREDDIT_NAME, page_type.name)
record = (label, dt.year, dt.month, dt.day, PAGES_PER_DAY)
assert record in output
def test_generate_reddit_subreddit(init_subreddits):
assert len(db_session.query(Subreddit).all()) == 1
report = importlib.reload(utils.email_db_report)
output = report.generate_reddit_subreddit(END_DT, DAYS, html=False)
assert len(output) == 1
dt = END_DT - timedelta(days=DAYS)
assert output[0][1:] == (dt.year, dt.month, dt.day, 1)
def test_generate_reddit_post(init_posts):
assert len(db_session.query(Post).all()) == DAYS * POSTS_PER_DAY
report = importlib.reload(utils.email_db_report)
output = {tuple(item) for item in report.generate_reddit_post(END_DT, DAYS, html=False)}
assert len(output) == DAYS
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
record = (SUBREDDIT_NAME, dt.year, dt.month, dt.day, POSTS_PER_DAY)
assert record in output
def test_generate_reddit_comment(init_comments):
assert len(db_session.query(Comment).all()) == DAYS * POSTS_PER_DAY * COMMENTS_PER_POST
report = importlib.reload(utils.email_db_report)
output = {tuple(item) for item in report.generate_reddit_comment(END_DT, DAYS, html=False)}
assert len(output) == DAYS
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
record = (SUBREDDIT_NAME, dt.year, dt.month, dt.day, COMMENTS_PER_POST * POSTS_PER_DAY)
assert record in output
def test_generate_reddit_user(init_users):
assert len(db_session.query(User).all()) == DAYS * USERS_PER_DAY
report = importlib.reload(utils.email_db_report)
output = {tuple(item)[1:] for item in report.generate_reddit_user(END_DT, DAYS, html=False)}
assert len(output) == DAYS
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
record = (dt.year, dt.month, dt.day, USERS_PER_DAY)
assert record in output
def test_generate_reddit_mod_action(init_mod_actions):
assert len(db_session.query(ModAction).all()) == DAYS * MOD_ACTIONS_PER_DAY
report = importlib.reload(utils.email_db_report)
output = {tuple(item) for item in report.generate_reddit_mod_action(END_DT, DAYS, html=False)}
assert len(output) == DAYS
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
record = (SUBREDDIT_NAME, dt.year, dt.month, dt.day, MOD_ACTIONS_PER_DAY)
assert record in output
def test_generate_experiment_new(init_experiments):
assert len(db_session.query(Experiment).all()) == DAYS * EXPERIMENTS_PER_DAY
report = importlib.reload(utils.email_db_report)
output = {tuple(item)[1:] for item in report.generate_experiment_new(END_DT, DAYS, html=False)}
assert len(output) == DAYS
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
record = (dt.year, dt.month, dt.day, EXPERIMENTS_PER_DAY)
assert record in output
def test_generate_experiment_active(init_experiments):
assert len(db_session.query(Experiment).all()) == DAYS * EXPERIMENTS_PER_DAY
report = importlib.reload(utils.email_db_report)
output = list(report.generate_experiment_active(END_DT, DAYS, html=False)['total count'].values())
assert len(output) == DAYS
for day in range(DAYS):
assert output[day] == EXPERIMENTS_PER_DAY
def test_generate_experiment_thing(init_experiment_things):
count = DAYS * EXPERIMENTS_PER_DAY * THINGS_PER_EXPERIMENT
assert len(db_session.query(ExperimentThing).all()) == count
report = importlib.reload(utils.email_db_report)
output = set(report.generate_experiment_thing(END_DT, DAYS, html=False))
assert len(output) == DAYS * EXPERIMENTS_PER_DAY
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, _ = get_experiment_test_info(day, experiment)
label = "(%s, %s)" % (experiment_id, ThingType.SUBMISSION.name)
record = (label, dt.year, dt.month, dt.day, THINGS_PER_EXPERIMENT)
assert record in output
def test_generate_experiment_thing_snapshot(init_experiment_thing_snapshots):
count = DAYS * EXPERIMENTS_PER_DAY * THINGS_PER_EXPERIMENT * SNAPSHOTS_PER_THING
assert len(db_session.query(ExperimentThingSnapshot).all()) == count
report = importlib.reload(utils.email_db_report)
output = set(report.generate_experiment_thing_snapshot(END_DT, DAYS, html=False))
assert len(output) == DAYS * EXPERIMENTS_PER_DAY
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, _ = get_experiment_test_info(day, experiment)
label = "(%s, %s)" % (experiment_id, ThingType.SUBMISSION.name)
record = (label, dt.year, dt.month, dt.day, SNAPSHOTS_PER_THING * THINGS_PER_EXPERIMENT)
assert record in output
def test_generate_experiment_action(init_experiment_actions):
count = DAYS * EXPERIMENTS_PER_DAY * ACTIONS_PER_EXPERIMENT
assert len(db_session.query(ExperimentAction).all()) == count
report = importlib.reload(utils.email_db_report)
output = set(report.generate_experiment_action(END_DT, DAYS, html=False))
assert len(output) == DAYS * EXPERIMENTS_PER_DAY
for day in range(DAYS):
dt = END_DT - timedelta(days=day)
for experiment in range(EXPERIMENTS_PER_DAY):
experiment_id, _ = get_experiment_test_info(day, experiment)
label = "(%s, %s)" % (experiment_id, "test")
record = (label, dt.year, dt.month, dt.day, ACTIONS_PER_EXPERIMENT)
assert record in output
|
|
#!/usr/bin/env python
"""t is for people that want do things, not organize their tasks."""
from __future__ import with_statement
import os, re, sys, hashlib
from operator import itemgetter
from optparse import OptionParser, OptionGroup
class InvalidTaskfile(Exception):
"""Raised when the path to a task file already exists as a directory."""
pass
class AmbiguousPrefix(Exception):
"""Raised when trying to use a prefix that could identify multiple tasks."""
def __init__(self, prefix):
super(AmbiguousPrefix, self).__init__()
self.prefix = prefix
class UnknownPrefix(Exception):
"""Raised when trying to use a prefix that does not match any tasks."""
def __init__(self, prefix):
super(UnknownPrefix, self).__init__()
self.prefix = prefix
def _hash(text):
"""Return a hash of the given text for use as an id.
Currently SHA1 hashing is used. It should be plenty for our purposes.
"""
return hashlib.sha1(text).hexdigest()
def _task_from_taskline(taskline):
"""Parse a taskline (from a task file) and return a task.
A taskline should be in the format:
summary text ... | meta1:meta1_value,meta2:meta2_value,...
The task returned will be a dictionary such as:
{ 'id': <hash id>,
'text': <summary text>,
... other metadata ... }
A taskline can also consist of only summary text, in which case the id
and other metadata will be generated when the line is read. This is
supported to enable editing of the taskfile with a simple text editor.
"""
if taskline.strip().startswith('#'):
return None
elif '|' in taskline:
text, _, meta = taskline.rpartition('|')
task = { 'text': text.strip() }
for piece in meta.strip().split(','):
label, data = piece.split(':')
task[label.strip()] = data.strip()
else:
text = taskline.strip()
task = { 'id': _hash(text), 'text': text }
return task
def _tasklines_from_tasks(tasks):
"""Parse a list of tasks into tasklines suitable for writing."""
tasklines = []
for task in tasks:
meta = [m for m in task.items() if m[0] != 'text']
meta_str = ', '.join('%s:%s' % m for m in meta)
tasklines.append('%s | %s\n' % (task['text'], meta_str))
return tasklines
def _prefixes(ids):
"""Return a mapping of ids to prefixes in O(n) time.
Each prefix will be the shortest possible substring of the ID that
can uniquely identify it among the given group of IDs.
If an ID of one task is entirely a substring of another task's ID, the
entire ID will be the prefix.
"""
ps = {}
for id in ids:
id_len = len(id)
for i in range(1, id_len+1):
# identifies an empty prefix slot, or a singular collision
prefix = id[:i]
if (not prefix in ps) or (ps[prefix] and prefix != ps[prefix]):
break
if prefix in ps:
# if there is a collision
other_id = ps[prefix]
for j in range(i, id_len+1):
if other_id[:j] == id[:j]:
ps[id[:j]] = ''
else:
ps[other_id[:j]] = other_id
ps[id[:j]] = id
break
else:
ps[other_id[:id_len+1]] = other_id
ps[id] = id
else:
# no collision, can safely add
ps[prefix] = id
ps = dict(zip(ps.values(), ps.keys()))
if '' in ps:
del ps['']
return ps
class TaskDict(object):
"""A set of tasks, both finished and unfinished, for a given list.
The list's files are read from disk when the TaskDict is initialized. They
can be written back out to disk with the write() function.
"""
def __init__(self, taskdir='.', name='tasks'):
"""Initialize by reading the task files, if they exist."""
self.tasks = {}
self.done = {}
self.name = name
self.taskdir = taskdir
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
if os.path.exists(path):
with open(path, 'r') as tfile:
tls = [tl.strip() for tl in tfile if tl]
tasks = map(_task_from_taskline, tls)
for task in tasks:
if task is not None:
getattr(self, kind)[task['id']] = task
def __getitem__(self, prefix):
"""Return the unfinished task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
matched = filter(lambda tid: tid.startswith(prefix), self.tasks.keys())
if len(matched) == 1:
return self.tasks[matched[0]]
elif len(matched) == 0:
raise UnknownPrefix(prefix)
else:
matched = filter(lambda tid: tid == prefix, self.tasks.keys())
if len(matched) == 1:
return self.tasks[matched[0]]
else:
raise AmbiguousPrefix(prefix)
def add_task(self, text):
"""Add a new, unfinished task with the given summary text."""
task_id = _hash(text)
self.tasks[task_id] = {'id': task_id, 'text': text}
def edit_task(self, prefix, text):
"""Edit the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
task = self[prefix]
if text.startswith('s/') or text.startswith('/'):
text = re.sub('^s?/', '', text).rstrip('/')
find, _, repl = text.partition('/')
text = re.sub(find, repl, task['text'])
task['text'] = text
task['id'] = _hash(text)
def finish_task(self, prefix):
"""Mark the task with the given prefix as finished.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
task = self.tasks.pop(self[prefix]['id'])
self.done[task['id']] = task
def remove_task(self, prefix):
"""Remove the task from tasks list.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
self.tasks.pop(self[prefix]['id'])
def print_list(self, kind='tasks', verbose=False, quiet=False, grep=''):
"""Print out a nicely formatted list of unfinished tasks."""
tasks = dict(getattr(self, kind).items())
label = 'prefix' if not verbose else 'id'
if not verbose:
for task_id, prefix in _prefixes(tasks).items():
tasks[task_id]['prefix'] = prefix
plen = max(map(lambda t: len(t[label]), tasks.values())) if tasks else 0
for _, task in sorted(tasks.items()):
if grep.lower() in task['text'].lower():
p = '%s - ' % task[label].ljust(plen) if not quiet else ''
print p + task['text']
def write(self, delete_if_empty=False):
"""Flush the finished and unfinished tasks to the files on disk."""
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
tasks = sorted(getattr(self, kind).values(), key=itemgetter('id'))
if tasks or not delete_if_empty:
with open(path, 'w') as tfile:
for taskline in _tasklines_from_tasks(tasks):
tfile.write(taskline)
elif not tasks and os.path.isfile(path):
os.remove(path)
def _build_parser():
"""Return a parser for the command-line interface."""
usage = "Usage: %prog [-t DIR] [-l LIST] [options] [TEXT]"
parser = OptionParser(usage=usage)
actions = OptionGroup(parser, "Actions",
"If no actions are specified the TEXT will be added as a new task.")
actions.add_option("-e", "--edit", dest="edit", default="",
help="edit TASK to contain TEXT", metavar="TASK")
actions.add_option("-f", "--finish", dest="finish",
help="mark TASK as finished", metavar="TASK")
actions.add_option("-r", "--remove", dest="remove",
help="Remove TASK from list", metavar="TASK")
parser.add_option_group(actions)
config = OptionGroup(parser, "Configuration Options")
config.add_option("-l", "--list", dest="name", default="tasks",
help="work on LIST", metavar="LIST")
config.add_option("-t", "--task-dir", dest="taskdir", default="",
help="work on the lists in DIR", metavar="DIR")
config.add_option("-d", "--delete-if-empty",
action="store_true", dest="delete", default=False,
help="delete the task file if it becomes empty")
parser.add_option_group(config)
output = OptionGroup(parser, "Output Options")
output.add_option("-g", "--grep", dest="grep", default='',
help="print only tasks that contain WORD", metavar="WORD")
output.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print more detailed output (full task ids, etc)")
output.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="print less detailed output (no task ids, etc)")
output.add_option("--done",
action="store_true", dest="done", default=False,
help="list done tasks instead of unfinished ones")
parser.add_option_group(output)
return parser
def _main():
"""Run the command-line interface."""
(options, args) = _build_parser().parse_args()
td = TaskDict(taskdir=options.taskdir, name=options.name)
text = ' '.join(args).strip()
try:
if options.finish:
td.finish_task(options.finish)
td.write(options.delete)
elif options.remove:
td.remove_task(options.remove)
td.write(options.delete)
elif options.edit:
td.edit_task(options.edit, text)
td.write(options.delete)
elif text:
td.add_task(text)
td.write(options.delete)
else:
kind = 'tasks' if not options.done else 'done'
td.print_list(kind=kind, verbose=options.verbose, quiet=options.quiet,
grep=options.grep)
except AmbiguousPrefix, e:
sys.stderr.write('The ID "%s" matches more than one task.\n' % e.prefix)
except UnknownPrefix, e:
sys.stderr.write('The ID "%s" does not match any task.\n' % e.prefix)
if __name__ == '__main__':
_main()
|
|
# -*- coding: utf-8 -*-
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3utils import S3CustomController
THEME = "Philippines"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
response = current.response
output = {}
#output["title"] = response.title = current.deployment_settings.get_system_name()
s3 = response.s3
# Image Carousel
s3.jquery_ready.append('''$('#myCarousel').carousel()''')
# Latest 4 Requests
s3db = current.s3db
list_id = "latest_reqs"
layout = s3db.req_req_list_layout
limit = 4
resource = s3db.resource("req_req")
s3db.req_customise_req_fields()
list_fields = s3db.get_config("req_req", "list_fields")
from s3.s3query import FS
resource.add_filter(FS("cancel") != True)
# Order with most recent first
orderby = "date desc"
output["latest_reqs"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
# Latest 4 Offers
list_id = "latest_offers"
layout = s3db.req_commit_list_layout
#limit = 4
resource = s3db.resource("req_commit")
s3db.req_customise_commit_fields()
list_fields = s3db.get_config("req_commit", "list_fields")
resource.add_filter(FS("cancel") != True)
# Order with most recent first
#orderby = "date desc"
output["latest_offers"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
# What We Do
table = s3db.cms_post
ltable = s3db.cms_post_module
query = (ltable.module == "default") & \
(ltable.resource == "index") & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.id,
table.body,
limitby=(0, 1)).first()
if item:
what_we_do = DIV(XML(item.body))
if current.auth.s3_has_role("ADMIN"):
if s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
what_we_do.append(A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[item.id, "update"],
vars={"module": "default",
"resource": "index",
}),
_class="%s cms-edit" % _class))
else:
what_we_do = DIV()
if current.auth.s3_has_role("ADMIN"):
if s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
what_we_do.append(A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=["create"],
vars={"module": "default",
"resource": "index",
}),
_class="%s cms-edit" % _class))
output["what_we_do"] = what_we_do
self._view(THEME, "index.html")
return output
# =============================================================================
def latest_records(resource, layout, list_id, limit, list_fields, orderby):
"""
Display a dataList of the latest records for a resource
"""
#orderby = resource.table[orderby]
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=limit,
list_id=list_id,
orderby=orderby,
layout=layout)
if numrows == 0:
# Empty table or just no match?
from s3.s3crud import S3CRUD
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
return data
# =============================================================================
class subscriptions(S3CustomController):
""" Custom page to manage subscriptions """
# -------------------------------------------------------------------------
def __call__(self):
""" Main entry point, configuration """
T = current.T
# Must be logged in
auth = current.auth
if not auth.s3_logged_in():
auth.permission.fail()
# Available resources
resources = [dict(resource="req_req",
url="req/req/datalist",
label=T("Requests")),
#dict(resource="req_commit",
# url="req/commit/datalist",
# label=T("Donations")),
#dict(resource="org_facility",
# url="org/facility/datalist",
# label=T("Sites")),
]
# Filter widgets
# @note: subscription manager has no resource context, so
# must configure fixed options or lookup resources
# for filter widgets which need it.
from s3.s3filter import S3LocationFilter
filters = [S3LocationFilter("location_id",
label=T("Location(s)"),
# @ToDo: Deeper levels
levels=["L1"],
options=self._options("location_id"),
widget="multiselect",
cols=3,
resource="req_req",
_name="location-filter"),
#S3OptionsFilter("site_id$organisation_id",
#label=T("Filter by Organization"),
#represent=s3db.org_organisation_represent,
##represent="%(name)s",
#widget="multiselect",
#cols=3,
#resource="req_req",
#_name="organisation-filter"),
]
# Title and view
title = T("Notification Settings")
self._view(THEME, "subscriptions.html")
# Form
form = self._manage_subscriptions(resources, filters)
return dict(title=title, form=form)
# -------------------------------------------------------------------------
@staticmethod
def _options(fieldname):
"""
Lookup the full set of options for a Filter Widget
- for Subscriptions we don't want to see just the options available in current data
"""
db = current.db
if fieldname == "location_id":
table = current.s3db.gis_location
query = (table.deleted == False) & \
(table.level == "L1")
# IDs converted inside widget's _options() function
rows = db(query).select(table.id)
options = [row.id for row in rows]
return options
# -------------------------------------------------------------------------
def _manage_subscriptions(self, resources, filters):
"""
Custom form to manage subscriptions
@param resources: available resources config
@param filters: filter widgets
"""
from gluon.sqlhtml import SQLFORM
from gluon.validators import IS_IN_SET
from s3.s3widgets import S3GroupedOptionsWidget
# L10n
T = current.T
labels = Storage(
RESOURCES = T("Subscribe To"),
NOTIFY_ON = T("Notify On"),
FREQUENCY = T("Frequency"),
NOTIFY_BY = T("Notify By"),
MORE = T("More Options"),
LESS = T("Less Options"),
)
messages = Storage(
ERROR = T("Error: could not update notification settings"),
SUCCESS = T("Notification settings updated"),
)
# Get current subscription settings resp. form defaults
subscription = self._get_subscription()
# Formstyle bootstrap
formstyle = SQLFORM.formstyles.bootstrap
# Initialize form
form = FORM(_id="subscription-form",
hidden={"subscription-filters": ""})
# Resource selector
# options = []
# selected_resources = set()
# subscribed = subscription["resources"]
# for idx, rconfig in enumerate(resources):
# options.append((idx, rconfig["label"]))
# if subscribed:
# for s in subscribed:
# if s.resource == rconfig["resource"] and \
# s.url == rconfig["url"]:
# selected_resources.add(idx)
# dummy = Storage(name="resources", requires = IS_IN_SET(options))
# selector = S3GroupedOptionsWidget(cols=2)
# row = ("resource_selector__row",
# "%s:" % labels.RESOURCES,
# selector(dummy,
# list(selected_resources),
# _id="resource_selector"),
# "")
# fieldset = formstyle(form, [row])
# form.append(fieldset)
# Filters
from s3.s3filter import S3FilterForm
filter_form = S3FilterForm(filters, clear=False)
fieldset = FIELDSET(filter_form.fields(None,
subscription["get_vars"]),
_id="subscription-filter-form")
form.append(fieldset)
# Notification options
rows = []
stable = current.s3db.pr_subscription
selector = S3GroupedOptionsWidget(cols=1)
rows.append(("trigger_selector__row",
"%s:" % labels.NOTIFY_ON,
selector(stable.notify_on,
subscription["notify_on"],
_id="trigger_selector"),
""))
switch = S3GroupedOptionsWidget(cols=1, multiple=False, sort=False)
rows.append(("frequency_selector__row",
"%s:" % labels.FREQUENCY,
switch(stable.frequency,
subscription["frequency"],
_id="frequency_selector"),
""))
# Deactivated: method selector
#rows.append(("method_selector__row",
#"%s:" % labels.NOTIFY_BY,
#selector(stable.method,
#subscription["method"],
#_id="method_selector"),
#""))
fieldset = formstyle(form, rows)
fieldset.insert(0,
DIV(SPAN([I(_class="icon-reorder"), labels.MORE],
_class="toggle-text",
_style="display:none"),
SPAN([I(_class="icon-reorder"), labels.LESS],
_class="toggle-text"),
_id="notification-options",
_class="control-group"))
form.append(fieldset)
# Submit button
row = ("submit__row", "",
INPUT(_type="submit", _value="Update Settings"), "")
fieldset = formstyle(form, [row])
form.append(fieldset)
# Script (to extract filters on submit and toggle options visibility)
script = URL(c="static", f="scripts", args=["S3", "s3.subscriptions.js"])
response = current.response
response.s3.scripts.append(script)
# Accept form
if form.accepts(current.request.post_vars,
current.session,
formname="subscription",
keepvalues=True):
formvars = form.vars
listify = lambda x: None if not x else x if type(x) is list else [x]
# Fixed resource selection:
subscription["subscribe"] = [resources[0]]
# Alternatively, with resource selector:
#subscribe = listify(formvars.resources)
#if subscribe:
#subscription["subscribe"] = \
#[r for idx, r in enumerate(resources)
#if str(idx) in subscribe]
subscription["filters"] = form.request_vars \
.get("subscription-filters", None)
subscription["notify_on"] = listify(formvars.notify_on)
subscription["frequency"] = formvars.frequency
# Fixed method:
subscription["method"] = ["EMAIL"]
# Alternatively, with method selector:
#subscription["method"] = listify(formvars.method)
success = self._update_subscription(subscription)
if success:
response.confirmation = messages.SUCCESS
else:
response.error = messages.ERROR
return form
# -------------------------------------------------------------------------
def _get_subscription(self):
""" Get current subscription settings """
db = current.db
s3db = current.s3db
pe_id = current.auth.user.pe_id
stable = s3db.pr_subscription
ftable = s3db.pr_filter
query = (stable.pe_id == pe_id) & \
(stable.deleted != True)
left = ftable.on(ftable.id == stable.filter_id)
row = db(query).select(stable.id,
stable.notify_on,
stable.frequency,
#stable.method,
ftable.id,
ftable.query,
left=left,
limitby=(0, 1)).first()
output = {"pe_id": pe_id}
get_vars = {}
if row:
# Existing settings
s = getattr(row, "pr_subscription")
f = getattr(row, "pr_filter")
rtable = s3db.pr_subscription_resource
query = (rtable.subscription_id == s.id) & \
(rtable.deleted != True)
rows = db(query).select(rtable.id,
rtable.resource,
rtable.url,
rtable.last_check_time,
rtable.next_check_time)
if f.query:
import json
filters = json.loads(f.query)
for k, v in filters:
if v is None:
continue
if k in get_vars:
if type(get_vars[k]) is list:
get_vars[k].append(v)
else:
get_vars[k] = [get_vars[k], v]
else:
get_vars[k] = v
output.update({"id": s.id,
"filter_id": f.id,
"get_vars" : get_vars,
"resources": rows,
"notify_on": s.notify_on,
"frequency": s.frequency,
"method": ["EMAIL"] #s.method,
})
else:
# Form defaults
output.update({"id": None,
"filter_id": None,
"get_vars" : get_vars,
"resources": None,
"notify_on": stable.notify_on.default,
"frequency": stable.frequency.default,
"method": ["EMAIL"] #stable.method.default
})
return output
# -------------------------------------------------------------------------
def _update_subscription(self, subscription):
""" Update subscription settings """
db = current.db
s3db = current.s3db
pe_id = subscription["pe_id"]
# Save filters
filter_id = subscription["filter_id"]
filters = subscription.get("filters")
if filters:
ftable = s3db.pr_filter
if not filter_id:
success = ftable.insert(pe_id=pe_id, query=filters)
filter_id = success
else:
success = db(ftable.id == filter_id).update(query=filters)
if not success:
return None
# Save subscription settings
stable = s3db.pr_subscription
subscription_id = subscription["id"]
frequency = subscription["frequency"]
if not subscription_id:
success = stable.insert(pe_id=pe_id,
filter_id=filter_id,
notify_on=subscription["notify_on"],
frequency=frequency,
method=subscription["method"])
subscription_id = success
else:
success = db(stable.id == subscription_id).update(
pe_id=pe_id,
filter_id=filter_id,
notify_on=subscription["notify_on"],
frequency=frequency,
method=subscription["method"])
if not success:
return None
# Save subscriptions
rtable = s3db.pr_subscription_resource
subscribe = subscription.get("subscribe")
if subscribe:
from datetime import datetime, timedelta
now = datetime.utcnow()
resources = subscription["resources"]
subscribed = {}
timestamps = {}
if resources:
for r in resources:
subscribed[(r.resource, r.url)] = r.id
timestamps[r.id] = (r.last_check_time,
r.next_check_time)
intervals = s3db.pr_subscription_check_intervals
interval = timedelta(minutes=intervals.get(frequency, 0))
keep = set()
fk = '''{"subscription_id": %s}''' % subscription_id
for new in subscribe:
resource, url = new["resource"], new["url"]
if (resource, url) not in subscribed:
# Restore subscription if previously unsubscribed, else
# insert new record
unsubscribed = {"deleted": True,
"deleted_fk": fk,
"resource": resource,
"url": url}
rtable.update_or_insert(_key=unsubscribed,
deleted=False,
deleted_fk=None,
subscription_id=subscription_id,
resource=resource,
url=url,
last_check_time=now,
next_check_time=None)
else:
# Keep it
record_id = subscribed[(resource, url)]
last_check_time, next_check_time = timestamps[record_id]
data = {}
if not last_check_time:
# Someone has tampered with the timestamps, so
# we need to reset them and start over
last_check_time = now
data["last_check_time"] = last_check_time
due = last_check_time + interval
if next_check_time != due:
# Time interval has changed
data["next_check_time"] = due
if data:
db(rtable.id == record_id).update(**data)
keep.add(record_id)
# Unsubscribe all others
unsubscribe = set(subscribed.values()) - keep
db(rtable.id.belongs(unsubscribe)).update(deleted=True,
deleted_fk=fk,
subscription_id=None)
# Update subscription
subscription["id"] = subscription_id
subscription["filter_id"] = filter_id
return subscription
# END =========================================================================
|
|
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
try:
from matplotlib import colors
import matplotlib.cm as cm
except ImportError:
cm, colors = None, None
import bokeh
bokeh_version = LooseVersion(bokeh.__version__)
from bokeh.core.enums import Palette
from bokeh.document import Document
from bokeh.models.plots import Plot
from bokeh.models import GlyphRenderer
from bokeh.models.widgets import DataTable, Tabs
from bokeh.plotting import Figure
if bokeh_version >= '0.12':
from bokeh.layouts import WidgetBox
from ...core.options import abbreviated_exception
# Conversion between matplotlib and bokeh markers
markers = {'s': {'marker': 'square'},
'd': {'marker': 'diamond'},
'^': {'marker': 'triangle', 'orientation': 0},
'>': {'marker': 'triangle', 'orientation': np.pi/2},
'v': {'marker': 'triangle', 'orientation': np.pi},
'<': {'marker': 'triangle', 'orientation': -np.pi/2},
'1': {'marker': 'triangle', 'orientation': 0},
'2': {'marker': 'triangle', 'orientation': np.pi/2},
'3': {'marker': 'triangle', 'orientation': np.pi},
'4': {'marker': 'triangle', 'orientation': -np.pi/2}}
# List of models that do not update correctly and must be ignored
# Should only include models that have no direct effect on the display
# and can therefore be safely ignored. Axes currently fail saying
# LinearAxis.computed_bounds cannot be updated
IGNORED_MODELS = ['LinearAxis', 'LogAxis']
# Where to look for the ignored models
LOCATIONS = ['new', 'below']
# Model priority order to ensure some types are updated before others
MODEL_PRIORITY = ['Range1d', 'Title', 'Image', 'LinearColorMapper',
'Plot', 'Range1d', 'LinearAxis', 'ColumnDataSource']
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
def mplcmap_to_palette(cmap):
"""
Converts a matplotlib colormap to palette of RGB hex strings."
"""
if colors is None:
raise ValueError("Using cmaps on objects requires matplotlib.")
with abbreviated_exception():
colormap = cm.get_cmap(cmap) #choose any matplotlib colormap here
return [rgb2hex(m) for m in colormap(np.arange(colormap.N))]
def get_cmap(cmap):
"""
Returns matplotlib cmap generated from bokeh palette or
directly accessed from matplotlib.
"""
with abbreviated_exception():
rgb_vals = getattr(Palette, cmap, None)
if rgb_vals:
return colors.ListedColormap(rgb_vals, name=cmap)
return cm.get_cmap(cmap)
def mpl_to_bokeh(properties):
"""
Utility to process style properties converting any
matplotlib specific options to their nearest bokeh
equivalent.
"""
new_properties = {}
for k, v in properties.items():
if k == 's':
new_properties['size'] = v
elif k == 'marker':
new_properties.update(markers.get(v, {'marker': v}))
elif k == 'color' or k.endswith('_color'):
with abbreviated_exception():
v = colors.ColorConverter.colors.get(v, v)
if isinstance(v, tuple):
with abbreviated_exception():
v = rgb2hex(v)
new_properties[k] = v
else:
new_properties[k] = v
new_properties.pop('cmap', None)
return new_properties
def layout_padding(plots):
"""
Temporary workaround to allow empty plots in a
row of a bokeh GridPlot type. Should be removed
when https://github.com/bokeh/bokeh/issues/2891
is resolved.
"""
widths, heights = defaultdict(int), defaultdict(int)
for r, row in enumerate(plots):
for c, p in enumerate(row):
if p is not None:
width = p.plot_width if isinstance(p, Plot) else p.width
height = p.plot_height if isinstance(p, Plot) else p.height
widths[c] = max(widths[c], width)
heights[r] = max(heights[r], height)
expanded_plots = []
for r, row in enumerate(plots):
expanded_plots.append([])
for c, p in enumerate(row):
if p is None:
p = Figure(plot_width=widths[c],
plot_height=heights[r])
p.text(x=0, y=0, text=[' '])
p.xaxis.visible = False
p.yaxis.visible = False
p.outline_line_color = None
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
expanded_plots[r].append(p)
return expanded_plots
def convert_datetime(time):
return time.astype('datetime64[s]').astype(float)*1000
def models_to_json(models):
"""
Convert list of bokeh models into json to update plot(s).
"""
json_data, ids = [], []
for plotobj in models:
if plotobj.ref['id'] in ids:
continue
else:
ids.append(plotobj.ref['id'])
json = plotobj.to_json(False)
json.pop('tool_events', None)
json.pop('renderers', None)
json_data.append({'id': plotobj.ref['id'],
'type': plotobj.ref['type'],
'data': json})
return json_data
def refs(json):
"""
Finds all the references to other objects in the json
representation of a bokeh Document.
"""
result = {}
for obj in json['roots']['references']:
result[obj['id']] = obj
return result
def compute_static_patch(document, models, json=None):
"""
Computes a patch to update an existing document without
diffing the json first, making it suitable for static updates
between arbitrary frames. Note that this only supports changed
attributes and will break if new models have been added since
the plot was first created.
"""
references = refs(json if json else document.to_json())
requested_updates = [m.ref['id'] for m in models]
value_refs = {}
events = []
update_types = defaultdict(list)
for ref_id, obj in references.items():
if ref_id not in requested_updates:
continue
if obj['type'] in MODEL_PRIORITY:
priority = MODEL_PRIORITY.index(obj['type'])
else:
priority = float('inf')
for key, val in obj['attributes'].items():
event = Document._event_for_attribute_change(references,
obj, key, val,
value_refs)
events.append((priority, event))
update_types[obj['type']].append(key)
events = [delete_refs(e, LOCATIONS, IGNORED_MODELS)
for _, e in sorted(events, key=lambda x: x[0])]
value_refs = {ref_id: val for ref_id, val in value_refs.items()}
value_refs = delete_refs(value_refs, LOCATIONS, IGNORED_MODELS)
return dict(events=events, references=list(value_refs.values()))
def delete_refs(obj, locs, delete):
"""
Delete all references to specific model types by recursively
traversing the object and looking for the models to be deleted in
the supplied locations.
Note: Can be deleted once bokeh stops raising errors when updating
LinearAxis.computed_bounds
"""
if isinstance(obj, dict):
if 'type' in obj and obj['type'] in delete:
return None
for k, v in obj.items():
if k in locs:
ref = delete_refs(v, locs, delete)
if ref:
obj[k] = ref
else:
del obj[k]
else:
obj[k] = v
return obj
elif isinstance(obj, list):
objs = [delete_refs(v, locs, delete) for v in obj]
return [o for o in objs if o is not None]
else:
return obj
def hsv_to_rgb(hsv):
"""
Vectorized HSV to RGB conversion, adapted from:
http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion
"""
h, s, v = (hsv[..., i] for i in range(3))
shape = h.shape
i = np.int_(h*6.)
f = h*6.-i
q = f
t = 1.-f
i = np.ravel(i)
f = np.ravel(f)
i%=6
t = np.ravel(t)
q = np.ravel(q)
s = np.ravel(s)
v = np.ravel(v)
clist = (1-s*np.vstack([np.zeros_like(f),np.ones_like(f),q,t]))*v
#0:v 1:p 2:q 3:t
order = np.array([[0,3,1],[2,0,1],[1,0,3],[1,2,0],[3,1,0],[0,1,2]])
rgb = clist[order[i], np.arange(np.prod(shape))[:,None]]
return rgb.reshape(shape+(3,))
def update_plot(old, new):
"""
Updates an existing plot or figure with a new plot,
useful for bokeh charts and mpl conversions, which do
not allow updating an existing plot easily.
ALERT: Should be replaced once bokeh supports it directly
"""
old_renderers = old.select(type=GlyphRenderer)
new_renderers = new.select(type=GlyphRenderer)
old.x_range.update(**new.x_range.properties_with_values())
old.y_range.update(**new.y_range.properties_with_values())
updated = []
for new_r in new_renderers:
for old_r in old_renderers:
if type(old_r.glyph) == type(new_r.glyph):
old_renderers.pop(old_renderers.index(old_r))
new_props = new_r.properties_with_values()
source = new_props.pop('data_source')
old_r.glyph.update(**new_r.glyph.properties_with_values())
old_r.update(**new_props)
old_r.data_source.data.update(source.data)
updated.append(old_r)
break
for old_r in old_renderers:
if old_r not in updated:
emptied = {k: [] for k in old_r.data_source.data}
old_r.data_source.data.update(emptied)
def pad_plots(plots, padding=0.85):
"""
Accepts a grid of bokeh plots in form of a list of lists and
wraps any DataTable or Tabs in a WidgetBox with appropriate
padding. Required to avoid overlap in gridplot.
"""
widths = []
for row in plots:
row_widths = []
for p in row:
if isinstance(p, Tabs):
width = np.max([p.width if isinstance(p, DataTable) else
t.child.plot_width for t in p.tabs])
for p in p.tabs:
p.width = int(padding*width)
elif isinstance(p, DataTable):
width = p.width
p.width = int(padding*width)
elif p:
width = p.plot_width
else:
width = 0
row_widths.append(width)
widths.append(row_widths)
plots = [[WidgetBox(p, width=w) if isinstance(p, (DataTable, Tabs)) else p
for p, w in zip(row, ws)] for row, ws in zip(plots, widths)]
total_width = np.max([np.sum(row) for row in widths])
return plots, total_width
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test the C{I...Endpoint} implementations that wrap the L{IReactorTCP},
L{IReactorSSL}, and L{IReactorUNIX} interfaces found in
L{twisted.internet.endpoints}.
"""
from socket import AF_INET, AF_INET6
from errno import EPERM
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.internet import error, interfaces
from twisted.internet import endpoints
from twisted.internet.address import IPv4Address, UNIXAddress
from twisted.test.proto_helpers import MemoryReactor
from twisted.python.systemd import ListenFDs
from twisted.plugin import getPlugins
from twisted import plugins
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
from twisted.protocols import basic
from twisted.internet import protocol, reactor, stdio
from twisted.internet.stdio import PipeAddress
from twisted.internet.test.test_endpointspy3 import (
EndpointTestCaseMixin, ServerEndpointTestCaseMixin, skipSSL,
pemPath)
casPath = getModule(__name__).filePath.sibling("fake_CAs")
escapedPEMPathName = endpoints.quoteStringArgument(pemPath.path)
escapedCAsPathName = endpoints.quoteStringArgument(casPath.path)
if not skipSSL:
from OpenSSL.SSL import ContextType
from twisted.internet.ssl import CertificateOptions, Certificate, \
KeyPair, PrivateCertificate
from twisted.internet.test.test_endpointspy3 import (testCertificate,
testPrivateCertificate)
class StdioFactory(protocol.Factory):
protocol = basic.LineReceiver
class StandardIOEndpointsTestCase(unittest.TestCase):
"""
Tests for Standard I/O Endpoints
"""
def setUp(self):
self.ep = endpoints.StandardIOEndpoint(reactor)
def test_standardIOInstance(self):
"""
The endpoint creates an L{endpoints.StandardIO} instance.
"""
self.d = self.ep.listen(StdioFactory())
def checkInstanceAndLoseConnection(stdioOb):
self.assertIsInstance(stdioOb, stdio.StandardIO)
stdioOb.loseConnection()
self.d.addCallback(checkInstanceAndLoseConnection)
return self.d
def test_reactor(self):
"""
The reactor passed to the endpoint is set as its _reactor attribute.
"""
self.assertEqual(self.ep._reactor, reactor)
def test_protocol(self):
"""
The protocol used in the endpoint is a L{basic.LineReceiver} instance.
"""
self.d = self.ep.listen(StdioFactory())
def checkProtocol(stdioOb):
from twisted.python.runtime import platform
if platform.isWindows():
self.assertIsInstance(stdioOb.proto, basic.LineReceiver)
else:
self.assertIsInstance(stdioOb.protocol, basic.LineReceiver)
stdioOb.loseConnection()
self.d.addCallback(checkProtocol)
return self.d
def test_address(self):
"""
The address passed to the factory's buildProtocol in the endpoint
should be a PipeAddress instance.
"""
class TestAddrFactory(protocol.Factory):
protocol = basic.LineReceiver
_address = None
def buildProtocol(self, addr):
self._address = addr
p = self.protocol()
p.factory = self
return p
def getAddress(self):
return self._address
myFactory = TestAddrFactory()
self.d = self.ep.listen(myFactory)
def checkAddress(stdioOb):
self.assertIsInstance(myFactory.getAddress(), PipeAddress)
stdioOb.loseConnection()
self.d.addCallback(checkAddress)
return self.d
class UNIXEndpointsTestCase(EndpointTestCaseMixin,
unittest.TestCase):
"""
Tests for UnixSocket Endpoints.
"""
def retrieveConnectedFactory(self, reactor):
"""
Override L{EndpointTestCaseMixin.retrieveConnectedFactory} to account
for different index of 'factory' in C{connectUNIX} args.
"""
return self.expectedClients(reactor)[0][1]
def expectedServers(self, reactor):
"""
@return: List of calls to L{IReactorUNIX.listenUNIX}
"""
return reactor.unixServers
def expectedClients(self, reactor):
"""
@return: List of calls to L{IReactorUNIX.connectUNIX}
"""
return reactor.unixClients
def assertConnectArgs(self, receivedArgs, expectedArgs):
"""
Compare path, timeout, checkPID in C{receivedArgs} to C{expectedArgs}.
We ignore the factory because we don't only care what protocol comes
out of the C{IStreamClientEndpoint.connect} call.
@param receivedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID})
that was passed to L{IReactorUNIX.connectUNIX}.
@param expectedArgs: C{tuple} of (C{path}, C{timeout}, C{checkPID})
that we expect to have been passed to L{IReactorUNIX.connectUNIX}.
"""
(path, ignoredFactory, timeout, checkPID) = receivedArgs
(expectedPath, _ignoredFactory, expectedTimeout,
expectedCheckPID) = expectedArgs
self.assertEqual(path, expectedPath)
self.assertEqual(timeout, expectedTimeout)
self.assertEqual(checkPID, expectedCheckPID)
def connectArgs(self):
"""
@return: C{dict} of keyword arguments to pass to connect.
"""
return {'timeout': 10, 'checkPID': 1}
def listenArgs(self):
"""
@return: C{dict} of keyword arguments to pass to listen
"""
return {'backlog': 100, 'mode': 0600, 'wantPID': 1}
def createServerEndpoint(self, reactor, factory, **listenArgs):
"""
Create an L{UNIXServerEndpoint} and return the tools to verify its
behaviour.
@param reactor: A fake L{IReactorUNIX} that L{UNIXServerEndpoint} can
call L{IReactorUNIX.listenUNIX} on.
@param factory: The thing that we expect to be passed to our
L{IStreamServerEndpoint.listen} implementation.
@param listenArgs: Optional dictionary of arguments to
L{IReactorUNIX.listenUNIX}.
"""
address = UNIXAddress(self.mktemp())
return (endpoints.UNIXServerEndpoint(reactor, address.name,
**listenArgs),
(address.name, factory,
listenArgs.get('backlog', 50),
listenArgs.get('mode', 0666),
listenArgs.get('wantPID', 0)),
address)
def createClientEndpoint(self, reactor, clientFactory, **connectArgs):
"""
Create an L{UNIXClientEndpoint} and return the values needed to verify
its behaviour.
@param reactor: A fake L{IReactorUNIX} that L{UNIXClientEndpoint} can
call L{IReactorUNIX.connectUNIX} on.
@param clientFactory: The thing that we expect to be passed to our
L{IStreamClientEndpoint.connect} implementation.
@param connectArgs: Optional dictionary of arguments to
L{IReactorUNIX.connectUNIX}
"""
address = UNIXAddress(self.mktemp())
return (endpoints.UNIXClientEndpoint(reactor, address.name,
**connectArgs),
(address.name, clientFactory,
connectArgs.get('timeout', 30),
connectArgs.get('checkPID', 0)),
address)
class ParserTestCase(unittest.TestCase):
"""
Tests for L{endpoints._parseServer}, the low-level parsing logic.
"""
f = "Factory"
def parse(self, *a, **kw):
"""
Provide a hook for test_strports to substitute the deprecated API.
"""
return endpoints._parseServer(*a, **kw)
def test_simpleTCP(self):
"""
Simple strings with a 'tcp:' prefix should be parsed as TCP.
"""
self.assertEqual(self.parse('tcp:80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def test_interfaceTCP(self):
"""
TCP port descriptions parse their 'interface' argument as a string.
"""
self.assertEqual(
self.parse('tcp:80:interface=127.0.0.1', self.f),
('TCP', (80, self.f), {'interface':'127.0.0.1', 'backlog':50}))
def test_backlogTCP(self):
"""
TCP port descriptions parse their 'backlog' argument as an integer.
"""
self.assertEqual(self.parse('tcp:80:backlog=6', self.f),
('TCP', (80, self.f),
{'interface':'', 'backlog':6}))
def test_simpleUNIX(self):
"""
L{endpoints._parseServer} returns a C{'UNIX'} port description with
defaults for C{'mode'}, C{'backlog'}, and C{'wantPID'} when passed a
string with the C{'unix:'} prefix and no other parameter values.
"""
self.assertEqual(
self.parse('unix:/var/run/finger', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_modeUNIX(self):
"""
C{mode} can be set by including C{"mode=<some integer>"}.
"""
self.assertEqual(
self.parse('unix:/var/run/finger:mode=0660', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0660, 'backlog': 50, 'wantPID': True}))
def test_wantPIDUNIX(self):
"""
C{wantPID} can be set to false by included C{"lockfile=0"}.
"""
self.assertEqual(
self.parse('unix:/var/run/finger:lockfile=0', self.f),
('UNIX', ('/var/run/finger', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': False}))
def test_escape(self):
"""
Backslash can be used to escape colons and backslashes in port
descriptions.
"""
self.assertEqual(
self.parse(r'unix:foo\:bar\=baz\:qux\\', self.f),
('UNIX', ('foo:bar=baz:qux\\', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_quoteStringArgument(self):
"""
L{endpoints.quoteStringArgument} should quote backslashes and colons
for interpolation into L{endpoints.serverFromString} and
L{endpoints.clientFactory} arguments.
"""
self.assertEqual(endpoints.quoteStringArgument("some : stuff \\"),
"some \\: stuff \\\\")
def test_impliedEscape(self):
"""
In strports descriptions, '=' in a parameter value does not need to be
quoted; it will simply be parsed as part of the value.
"""
self.assertEqual(
self.parse(r'unix:address=foo=bar', self.f),
('UNIX', ('foo=bar', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_nonstandardDefault(self):
"""
For compatibility with the old L{twisted.application.strports.parse},
the third 'mode' argument may be specified to L{endpoints.parse} to
indicate a default other than TCP.
"""
self.assertEqual(
self.parse('filename', self.f, 'unix'),
('UNIX', ('filename', self.f),
{'mode': 0666, 'backlog': 50, 'wantPID': True}))
def test_unknownType(self):
"""
L{strports.parse} raises C{ValueError} when given an unknown endpoint
type.
"""
self.assertRaises(ValueError, self.parse, "bogus-type:nothing", self.f)
class ServerStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.serverFromString}.
"""
def test_tcp(self):
"""
When passed a TCP strports description, L{endpoints.serverFromString}
returns a L{TCP4ServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
server = endpoints.serverFromString(
reactor, "tcp:1234:backlog=12:interface=10.0.0.1")
self.assertIsInstance(server, endpoints.TCP4ServerEndpoint)
self.assertIdentical(server._reactor, reactor)
self.assertEqual(server._port, 1234)
self.assertEqual(server._backlog, 12)
self.assertEqual(server._interface, "10.0.0.1")
def test_ssl(self):
"""
When passed an SSL strports description, L{endpoints.serverFromString}
returns a L{SSL4ServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
server = endpoints.serverFromString(
reactor,
"ssl:1234:backlog=12:privateKey=%s:"
"certKey=%s:interface=10.0.0.1" % (escapedPEMPathName,
escapedPEMPathName))
self.assertIsInstance(server, endpoints.SSL4ServerEndpoint)
self.assertIdentical(server._reactor, reactor)
self.assertEqual(server._port, 1234)
self.assertEqual(server._backlog, 12)
self.assertEqual(server._interface, "10.0.0.1")
ctx = server._sslContextFactory.getContext()
self.assertIsInstance(ctx, ContextType)
if skipSSL:
test_ssl.skip = skipSSL
def test_unix(self):
"""
When passed a UNIX strports description, L{endpoint.serverFromString}
returns a L{UNIXServerEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
endpoint = endpoints.serverFromString(
reactor,
"unix:/var/foo/bar:backlog=7:mode=0123:lockfile=1")
self.assertIsInstance(endpoint, endpoints.UNIXServerEndpoint)
self.assertIdentical(endpoint._reactor, reactor)
self.assertEqual(endpoint._address, "/var/foo/bar")
self.assertEqual(endpoint._backlog, 7)
self.assertEqual(endpoint._mode, 0123)
self.assertEqual(endpoint._wantPID, True)
def test_implicitDefaultNotAllowed(self):
"""
The older service-based API (L{twisted.internet.strports.service})
allowed an implicit default of 'tcp' so that TCP ports could be
specified as a simple integer, but we've since decided that's a bad
idea, and the new API does not accept an implicit default argument; you
have to say 'tcp:' now. If you try passing an old implicit port number
to the new API, you'll get a C{ValueError}.
"""
value = self.assertRaises(
ValueError, endpoints.serverFromString, None, "4321")
self.assertEqual(
str(value),
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:4321'.")
def test_unknownType(self):
"""
L{endpoints.serverFromString} raises C{ValueError} when given an
unknown endpoint type.
"""
value = self.assertRaises(
# faster-than-light communication not supported
ValueError, endpoints.serverFromString, None,
"ftl:andromeda/carcosa/hali/2387")
self.assertEqual(
str(value),
"Unknown endpoint type: 'ftl'")
def test_typeFromPlugin(self):
"""
L{endpoints.serverFromString} looks up plugins of type
L{IStreamServerEndpoint} and constructs endpoints from them.
"""
# Set up a plugin which will only be accessible for the duration of
# this test.
addFakePlugin(self)
# Plugin is set up: now actually test.
notAReactor = object()
fakeEndpoint = endpoints.serverFromString(
notAReactor, "fake:hello:world:yes=no:up=down")
from twisted.plugins.fakeendpoint import fake
self.assertIdentical(fakeEndpoint.parser, fake)
self.assertEqual(fakeEndpoint.args, (notAReactor, 'hello', 'world'))
self.assertEqual(fakeEndpoint.kwargs, dict(yes='no', up='down'))
def addFakePlugin(testCase, dropinSource="fakeendpoint.py"):
"""
For the duration of C{testCase}, add a fake plugin to twisted.plugins which
contains some sample endpoint parsers.
"""
import sys
savedModules = sys.modules.copy()
savedPluginPath = plugins.__path__
def cleanup():
sys.modules.clear()
sys.modules.update(savedModules)
plugins.__path__[:] = savedPluginPath
testCase.addCleanup(cleanup)
fp = FilePath(testCase.mktemp())
fp.createDirectory()
getModule(__name__).filePath.sibling(dropinSource).copyTo(
fp.child(dropinSource))
plugins.__path__.append(fp.path)
class ClientStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.clientFromString}.
"""
def test_tcp(self):
"""
When passed a TCP strports description, L{endpoints.clientFromString}
returns a L{TCP4ClientEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:host=example.com:port=1234:timeout=7:bindAddress=10.0.0.2")
self.assertIsInstance(client, endpoints.TCP4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._host, "example.com")
self.assertEqual(client._port, 1234)
self.assertEqual(client._timeout, 7)
self.assertEqual(client._bindAddress, "10.0.0.2")
def test_tcpPositionalArgs(self):
"""
When passed a TCP strports description using positional arguments,
L{endpoints.clientFromString} returns a L{TCP4ClientEndpoint} instance
initialized with the values from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:example.com:1234:timeout=7:bindAddress=10.0.0.2")
self.assertIsInstance(client, endpoints.TCP4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._host, "example.com")
self.assertEqual(client._port, 1234)
self.assertEqual(client._timeout, 7)
self.assertEqual(client._bindAddress, "10.0.0.2")
def test_tcpHostPositionalArg(self):
"""
When passed a TCP strports description specifying host as a positional
argument, L{endpoints.clientFromString} returns a L{TCP4ClientEndpoint}
instance initialized with the values from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:example.com:port=1234:timeout=7:bindAddress=10.0.0.2")
self.assertEqual(client._host, "example.com")
self.assertEqual(client._port, 1234)
def test_tcpPortPositionalArg(self):
"""
When passed a TCP strports description specifying port as a positional
argument, L{endpoints.clientFromString} returns a L{TCP4ClientEndpoint}
instance initialized with the values from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:host=example.com:1234:timeout=7:bindAddress=10.0.0.2")
self.assertEqual(client._host, "example.com")
self.assertEqual(client._port, 1234)
def test_tcpDefaults(self):
"""
A TCP strports description may omit I{timeout} or I{bindAddress} to
allow the default to be used.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"tcp:host=example.com:port=1234")
self.assertEqual(client._timeout, 30)
self.assertEqual(client._bindAddress, None)
def test_unix(self):
"""
When passed a UNIX strports description, L{endpoints.clientFromString}
returns a L{UNIXClientEndpoint} instance initialized with the values
from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"unix:path=/var/foo/bar:lockfile=1:timeout=9")
self.assertIsInstance(client, endpoints.UNIXClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._path, "/var/foo/bar")
self.assertEqual(client._timeout, 9)
self.assertEqual(client._checkPID, True)
def test_unixDefaults(self):
"""
A UNIX strports description may omit I{lockfile} or I{timeout} to allow
the defaults to be used.
"""
client = endpoints.clientFromString(object(), "unix:path=/var/foo/bar")
self.assertEqual(client._timeout, 30)
self.assertEqual(client._checkPID, False)
def test_unixPathPositionalArg(self):
"""
When passed a UNIX strports description specifying path as a positional
argument, L{endpoints.clientFromString} returns a L{UNIXClientEndpoint}
instance initialized with the values from the string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"unix:/var/foo/bar:lockfile=1:timeout=9")
self.assertIsInstance(client, endpoints.UNIXClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._path, "/var/foo/bar")
self.assertEqual(client._timeout, 9)
self.assertEqual(client._checkPID, True)
def test_typeFromPlugin(self):
"""
L{endpoints.clientFromString} looks up plugins of type
L{IStreamClientEndpoint} and constructs endpoints from them.
"""
addFakePlugin(self)
notAReactor = object()
clientEndpoint = endpoints.clientFromString(
notAReactor, "cfake:alpha:beta:cee=dee:num=1")
from twisted.plugins.fakeendpoint import fakeClient
self.assertIdentical(clientEndpoint.parser, fakeClient)
self.assertEqual(clientEndpoint.args, ('alpha', 'beta'))
self.assertEqual(clientEndpoint.kwargs, dict(cee='dee', num='1'))
def test_unknownType(self):
"""
L{endpoints.serverFromString} raises C{ValueError} when given an
unknown endpoint type.
"""
value = self.assertRaises(
# faster-than-light communication not supported
ValueError, endpoints.clientFromString, None,
"ftl:andromeda/carcosa/hali/2387")
self.assertEqual(
str(value),
"Unknown endpoint type: 'ftl'")
class SSLClientStringTests(unittest.TestCase):
"""
Tests for L{twisted.internet.endpoints.clientFromString} which require SSL.
"""
if skipSSL:
skip = skipSSL
def test_ssl(self):
"""
When passed an SSL strports description, L{clientFromString} returns a
L{SSL4ClientEndpoint} instance initialized with the values from the
string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"ssl:host=example.net:port=4321:privateKey=%s:"
"certKey=%s:bindAddress=10.0.0.3:timeout=3:caCertsDir=%s" %
(escapedPEMPathName,
escapedPEMPathName,
escapedCAsPathName))
self.assertIsInstance(client, endpoints.SSL4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._host, "example.net")
self.assertEqual(client._port, 4321)
self.assertEqual(client._timeout, 3)
self.assertEqual(client._bindAddress, "10.0.0.3")
certOptions = client._sslContextFactory
self.assertIsInstance(certOptions, CertificateOptions)
ctx = certOptions.getContext()
self.assertIsInstance(ctx, ContextType)
self.assertEqual(Certificate(certOptions.certificate),
testCertificate)
privateCert = PrivateCertificate(certOptions.certificate)
privateCert._setPrivateKey(KeyPair(certOptions.privateKey))
self.assertEqual(privateCert, testPrivateCertificate)
expectedCerts = [
Certificate.loadPEM(x.getContent()) for x in
[casPath.child("thing1.pem"), casPath.child("thing2.pem")]
if x.basename().lower().endswith('.pem')
]
self.assertEqual(sorted((Certificate(x) for x in certOptions.caCerts),
key=lambda cert: cert.digest()),
sorted(expectedCerts,
key=lambda cert: cert.digest()))
def test_sslPositionalArgs(self):
"""
When passed an SSL strports description, L{clientFromString} returns a
L{SSL4ClientEndpoint} instance initialized with the values from the
string.
"""
reactor = object()
client = endpoints.clientFromString(
reactor,
"ssl:example.net:4321:privateKey=%s:"
"certKey=%s:bindAddress=10.0.0.3:timeout=3:caCertsDir=%s" %
(escapedPEMPathName,
escapedPEMPathName,
escapedCAsPathName))
self.assertIsInstance(client, endpoints.SSL4ClientEndpoint)
self.assertIdentical(client._reactor, reactor)
self.assertEqual(client._host, "example.net")
self.assertEqual(client._port, 4321)
self.assertEqual(client._timeout, 3)
self.assertEqual(client._bindAddress, "10.0.0.3")
def test_unreadableCertificate(self):
"""
If a certificate in the directory is unreadable,
L{endpoints._loadCAsFromDir} will ignore that certificate.
"""
class UnreadableFilePath(FilePath):
def getContent(self):
data = FilePath.getContent(self)
# There is a duplicate of thing2.pem, so ignore anything that
# looks like it.
if data == casPath.child("thing2.pem").getContent():
raise IOError(EPERM)
else:
return data
casPathClone = casPath.child("ignored").parent()
casPathClone.clonePath = UnreadableFilePath
self.assertEqual(
[Certificate(x) for x in endpoints._loadCAsFromDir(casPathClone)],
[Certificate.loadPEM(casPath.child("thing1.pem").getContent())])
def test_sslSimple(self):
"""
When passed an SSL strports description without any extra parameters,
L{clientFromString} returns a simple non-verifying endpoint that will
speak SSL.
"""
reactor = object()
client = endpoints.clientFromString(
reactor, "ssl:host=simple.example.org:port=4321")
certOptions = client._sslContextFactory
self.assertIsInstance(certOptions, CertificateOptions)
self.assertEqual(certOptions.verify, False)
ctx = certOptions.getContext()
self.assertIsInstance(ctx, ContextType)
class AdoptedStreamServerEndpointTestCase(ServerEndpointTestCaseMixin,
unittest.TestCase):
"""
Tests for adopted socket-based stream server endpoints.
"""
def _createStubbedAdoptedEndpoint(self, reactor, fileno, addressFamily):
"""
Create an L{AdoptedStreamServerEndpoint} which may safely be used with
an invalid file descriptor. This is convenient for a number of unit
tests.
"""
e = endpoints.AdoptedStreamServerEndpoint(reactor, fileno, addressFamily)
# Stub out some syscalls which would fail, given our invalid file
# descriptor.
e._close = lambda fd: None
e._setNonBlocking = lambda fd: None
return e
def createServerEndpoint(self, reactor, factory):
"""
Create a new L{AdoptedStreamServerEndpoint} for use by a test.
@return: A three-tuple:
- The endpoint
- A tuple of the arguments expected to be passed to the underlying
reactor method
- An IAddress object which will match the result of
L{IListeningPort.getHost} on the port returned by the endpoint.
"""
fileno = 12
addressFamily = AF_INET
endpoint = self._createStubbedAdoptedEndpoint(
reactor, fileno, addressFamily)
# Magic numbers come from the implementation of MemoryReactor
address = IPv4Address("TCP", "0.0.0.0", 1234)
return (endpoint, (fileno, addressFamily, factory), address)
def expectedServers(self, reactor):
"""
@return: The ports which were actually adopted by C{reactor} via calls
to its L{IReactorSocket.adoptStreamPort} implementation.
"""
return reactor.adoptedPorts
def listenArgs(self):
"""
@return: A C{dict} of additional keyword arguments to pass to the
C{createServerEndpoint}.
"""
return {}
def test_singleUse(self):
"""
L{AdoptedStreamServerEndpoint.listen} can only be used once. The file
descriptor given is closed after the first use, and subsequent calls to
C{listen} return a L{Deferred} that fails with L{AlreadyListened}.
"""
reactor = MemoryReactor()
endpoint = self._createStubbedAdoptedEndpoint(reactor, 13, AF_INET)
endpoint.listen(object())
d = self.assertFailure(endpoint.listen(object()), error.AlreadyListened)
def listenFailed(ignored):
self.assertEqual(1, len(reactor.adoptedPorts))
d.addCallback(listenFailed)
return d
def test_descriptionNonBlocking(self):
"""
L{AdoptedStreamServerEndpoint.listen} sets the file description given to
it to non-blocking.
"""
reactor = MemoryReactor()
endpoint = self._createStubbedAdoptedEndpoint(reactor, 13, AF_INET)
events = []
def setNonBlocking(fileno):
events.append(("setNonBlocking", fileno))
endpoint._setNonBlocking = setNonBlocking
d = endpoint.listen(object())
def listened(ignored):
self.assertEqual([("setNonBlocking", 13)], events)
d.addCallback(listened)
return d
def test_descriptorClosed(self):
"""
L{AdoptedStreamServerEndpoint.listen} closes its file descriptor after
adding it to the reactor with L{IReactorSocket.adoptStreamPort}.
"""
reactor = MemoryReactor()
endpoint = self._createStubbedAdoptedEndpoint(reactor, 13, AF_INET)
events = []
def close(fileno):
events.append(("close", fileno, len(reactor.adoptedPorts)))
endpoint._close = close
d = endpoint.listen(object())
def listened(ignored):
self.assertEqual([("close", 13, 1)], events)
d.addCallback(listened)
return d
class SystemdEndpointPluginTests(unittest.TestCase):
"""
Unit tests for the systemd stream server endpoint and endpoint string
description parser.
@see: U{systemd<http://www.freedesktop.org/wiki/Software/systemd>}
"""
_parserClass = endpoints._SystemdParser
def test_pluginDiscovery(self):
"""
L{endpoints._SystemdParser} is found as a plugin for
L{interfaces.IStreamServerEndpointStringParser} interface.
"""
parsers = list(getPlugins(
interfaces.IStreamServerEndpointStringParser))
for p in parsers:
if isinstance(p, self._parserClass):
break
else:
self.fail("Did not find systemd parser in %r" % (parsers,))
def test_interface(self):
"""
L{endpoints._SystemdParser} instances provide
L{interfaces.IStreamServerEndpointStringParser}.
"""
parser = self._parserClass()
self.assertTrue(verifyObject(
interfaces.IStreamServerEndpointStringParser, parser))
def _parseStreamServerTest(self, addressFamily, addressFamilyString):
"""
Helper for unit tests for L{endpoints._SystemdParser.parseStreamServer}
for different address families.
Handling of the address family given will be verify. If there is a
problem a test-failing exception will be raised.
@param addressFamily: An address family constant, like L{socket.AF_INET}.
@param addressFamilyString: A string which should be recognized by the
parser as representing C{addressFamily}.
"""
reactor = object()
descriptors = [5, 6, 7, 8, 9]
index = 3
parser = self._parserClass()
parser._sddaemon = ListenFDs(descriptors)
server = parser.parseStreamServer(
reactor, domain=addressFamilyString, index=str(index))
self.assertIdentical(server.reactor, reactor)
self.assertEqual(server.addressFamily, addressFamily)
self.assertEqual(server.fileno, descriptors[index])
def test_parseStreamServerINET(self):
"""
IPv4 can be specified using the string C{"INET"}.
"""
self._parseStreamServerTest(AF_INET, "INET")
def test_parseStreamServerINET6(self):
"""
IPv6 can be specified using the string C{"INET6"}.
"""
self._parseStreamServerTest(AF_INET6, "INET6")
def test_parseStreamServerUNIX(self):
"""
A UNIX domain socket can be specified using the string C{"UNIX"}.
"""
try:
from socket import AF_UNIX
except ImportError:
raise unittest.SkipTest("Platform lacks AF_UNIX support")
else:
self._parseStreamServerTest(AF_UNIX, "UNIX")
class TCP6ServerEndpointPluginTests(unittest.TestCase):
"""
Unit tests for the TCP IPv6 stream server endpoint string description parser.
"""
_parserClass = endpoints._TCP6ServerParser
def test_pluginDiscovery(self):
"""
L{endpoints._TCP6ServerParser} is found as a plugin for
L{interfaces.IStreamServerEndpointStringParser} interface.
"""
parsers = list(getPlugins(
interfaces.IStreamServerEndpointStringParser))
for p in parsers:
if isinstance(p, self._parserClass):
break
else:
self.fail("Did not find TCP6ServerEndpoint parser in %r" % (parsers,))
def test_interface(self):
"""
L{endpoints._TCP6ServerParser} instances provide
L{interfaces.IStreamServerEndpointStringParser}.
"""
parser = self._parserClass()
self.assertTrue(verifyObject(
interfaces.IStreamServerEndpointStringParser, parser))
def test_stringDescription(self):
"""
L{serverFromString} returns a L{TCP6ServerEndpoint} instance with a 'tcp6'
endpoint string description.
"""
ep = endpoints.serverFromString(MemoryReactor(),
"tcp6:8080:backlog=12:interface=\:\:1")
self.assertIsInstance(ep, endpoints.TCP6ServerEndpoint)
self.assertIsInstance(ep._reactor, MemoryReactor)
self.assertEqual(ep._port, 8080)
self.assertEqual(ep._backlog, 12)
self.assertEqual(ep._interface, '::1')
class StandardIOEndpointPluginTests(unittest.TestCase):
"""
Unit tests for the Standard I/O endpoint string description parser.
"""
_parserClass = endpoints._StandardIOParser
def test_pluginDiscovery(self):
"""
L{endpoints._StandardIOParser} is found as a plugin for
L{interfaces.IStreamServerEndpointStringParser} interface.
"""
parsers = list(getPlugins(
interfaces.IStreamServerEndpointStringParser))
for p in parsers:
if isinstance(p, self._parserClass):
break
else:
self.fail("Did not find StandardIOEndpoint parser in %r" % (parsers,))
def test_interface(self):
"""
L{endpoints._StandardIOParser} instances provide
L{interfaces.IStreamServerEndpointStringParser}.
"""
parser = self._parserClass()
self.assertTrue(verifyObject(
interfaces.IStreamServerEndpointStringParser, parser))
def test_stringDescription(self):
"""
L{serverFromString} returns a L{StandardIOEndpoint} instance with a 'stdio'
endpoint string description.
"""
ep = endpoints.serverFromString(MemoryReactor(), "stdio:")
self.assertIsInstance(ep, endpoints.StandardIOEndpoint)
self.assertIsInstance(ep._reactor, MemoryReactor)
|
|
# Copyright (c) 2015-2021 Patricio Cubillos and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
import os
import sys
import subprocess
import pytest
import numpy as np
import mc3
def quad(p, x):
"""
Quadratic polynomial function.
Parameters
p: Polynomial constant, linear, and quadratic coefficients.
x: Array of dependent variables where to evaluate the polynomial.
Returns
y: Polinomial evaluated at x: y(x) = p0 + p1*x + p2*x^2
"""
y = p[0] + p[1]*x + p[2]*x**2.0
return y
np.random.seed(12)
# Create a synthetic dataset:
x = np.linspace(0, 10, 100)
p0 = [4.5, -2.4, 0.5]
y = quad(p0, x)
uncert = np.sqrt(np.abs(y))
error = np.random.normal(0, uncert)
data = y + error
p1 = [4.5, 4.5, 0.5]
y1 = quad(p1, x)
uncert1 = np.sqrt(np.abs(y1))
data1 = y1 + np.random.normal(0, uncert1)
# Fit the quad polynomial coefficients:
params = np.array([10.0, -2.0, 0.1]) # Initial guess of fitting params.
pstep = np.array([0.03, 0.03, 0.05])
pnames = ["constant", "linear", "quadratic"]
texnames = ["$\\alpha$", "$\\log(\\beta)$", "quadratic"]
sampler = 'snooker'
def test_mcmc_minimal():
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
indparams=[x],
pstep=pstep, sampler=sampler, nsamples=1e4, burnin=100)
# No error? that's a pass.
assert output is not None
def test_mcmc_demc():
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
sampler='demc')
assert output is not None
def test_mcmc_func_as_strings(tmp_path):
p = tmp_path / "quadratic.py"
CONTENT = u'def quad(p, x):\n y = p[0] + p[1]*x + p[2]*x**2.0\n return y'
p.write_text(CONTENT)
output = mc3.sample(func=('quad', 'quadratic', str(tmp_path)),
params=np.copy(params),
data=data, uncert=uncert, indparams=[x], pstep=pstep,
sampler=sampler, nsamples=1e4, burnin=100)
assert output is not None
def test_mcmc_shared():
output = mc3.sample(data1, uncert1, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=[0.03, -1, 0.05],
nsamples=1e4, burnin=100)
assert output is not None
assert output['bestp'][1] == output['bestp'][0]
def test_mcmc_fixed():
pars = np.copy(params)
pars[0] = p0[0]
output = mc3.sample(data, uncert, func=quad, params=np.copy(pars),
sampler=sampler, indparams=[x],
pstep=[0, 0.03, 0.05], nsamples=1e4, burnin=100)
assert output is not None
assert len(output['bestp']) == len(params)
assert output['bestp'][0] == pars[0]
assert output['CRlo'][0] == 0
assert output['CRhi'][0] == 0
assert output['stdp'][0] == 0
def test_mcmc_bounds():
output = mc3.sample(data, uncert, func=quad, params=[4.5, -2.5, 0.5],
sampler=sampler, indparams=[x],
pstep=pstep,
pmin=[4.0, -3.0, 0.4], pmax=[5.0, -2.0, 0.6],
nsamples=1e4, burnin=100)
def test_mcmc_pnames(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
pnames=pnames)
captured = capsys.readouterr()
assert output is not None
assert "constant" in captured.out
assert "linear" in captured.out
assert "quadratic" in captured.out
def test_mcmc_texnames(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
texnames=texnames)
captured = capsys.readouterr()
assert output is not None
assert "$\\alpha$" in captured.out
assert "$\\log(\\beta" in captured.out
assert "quadratic" in captured.out
def test_mcmc_pnames_texnames(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
pnames=pnames, texnames=texnames)
captured = capsys.readouterr()
assert output is not None
assert "constant" in captured.out
assert "linear" in captured.out
assert "quadratic" in captured.out
@pytest.mark.parametrize('leastsq', ['lm', 'trf'])
def test_mcmc_optimize(capsys, leastsq):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
leastsq=leastsq)
captured = capsys.readouterr()
assert output is not None
assert "Least-squares best-fitting parameters:" in captured.out
np.testing.assert_allclose(output['bestp'],
np.array([4.28263253, -2.40781859, 0.49534411]), rtol=1e-7)
def test_mcmc_optimize_chisqscale(capsys):
unc = np.copy(uncert)
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
leastsq='lm', chisqscale=True)
captured = capsys.readouterr()
assert output is not None
assert "Least-squares best-fitting parameters (rescaled chisq):" \
in captured.out
assert "Reduced chi-squared: 1.0000" in captured.out
# Assert that uncert has not mutated:
np.testing.assert_equal(uncert, unc)
def test_mcmc_gr(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
grtest=True)
captured = capsys.readouterr()
assert output is not None
assert "Gelman-Rubin statistics for free parameters:" in captured.out
def test_mcmc_gr_break_frac(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
grtest=True, grbreak=1.1, grnmin=0.51)
captured = capsys.readouterr()
assert output is not None
assert "All parameters satisfy the GR convergence threshold of 1.1" \
in captured.out
def test_mcmc_gr_break_iterations(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
grtest=True, grbreak=1.1, grnmin=5000.0)
captured = capsys.readouterr()
assert output is not None
assert "All parameters satisfy the GR convergence threshold of 1.1" \
in captured.out
def test_mcmc_priors_gauss():
prior = np.array([ 4.5, 0.0, 0.0])
priorlow = np.array([ 0.1, 0.0, 0.0])
priorup = np.array([ 0.1, 0.0, 0.0])
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
prior=prior, priorlow=priorlow, priorup=priorup)
assert output is not None
assert -2*output['best_log_post'] > output['best_chisq']
assert np.all(-2*output['log_post'] > output['chisq'])
def test_mcmc_log(capsys, tmp_path):
os.chdir(str(tmp_path))
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
log='MCMC.log')
captured = capsys.readouterr()
assert output is not None
assert "'MCMC.log'" in captured.out
assert "MCMC.log" in os.listdir(".")
def test_mcmc_savefile(capsys, tmp_path):
os.chdir(str(tmp_path))
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
savefile='MCMC.npz')
captured = capsys.readouterr()
assert output is not None
assert "'MCMC.npz'" in captured.out
assert "MCMC.npz" in os.listdir(".")
def test_mcmc_plots(capsys, tmp_path):
os.chdir(str(tmp_path))
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100,
plots=True)
captured = capsys.readouterr()
assert output is not None
assert "snooker_trace.png" in captured.out
assert "snooker_pairwise.png" in captured.out
assert "snooker_posterior.png" in captured.out
assert "snooker_model.png" in captured.out
assert "snooker_trace.png" in os.listdir(".")
assert "snooker_pairwise.png" in os.listdir(".")
assert "snooker_posterior.png" in os.listdir(".")
assert "snooker_model.png" in os.listdir(".")
# Now, trigger the errors:
def test_mcmc_data_error(capsys):
output = mc3.sample(uncert=uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'data' is a required argument." in captured.out
def test_mcmc_uncert_error(capsys):
output = mc3.sample(data=data, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'uncert' is a required argument." in captured.out
def test_mcmc_func_error(capsys):
output = mc3.sample(data=data, uncert=uncert, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'func' must be either a callable or an iterable" in captured.out
def test_mcmc_params_error(capsys):
output = mc3.sample(data=data, uncert=uncert, func=quad, sampler=sampler,
indparams=[x], pstep=pstep, nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'params' is a required argument" in captured.out
def test_mcmc_sampler_error(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
indparams=[x], pstep=pstep,
nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'sampler' is a required argument." in captured.out
def test_mcmc_nsamples_error(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x],
pstep=pstep, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "'nsamples' is a required argument for MCMC runs." in captured.out
def test_mcmc_samples_error(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x], pstep=pstep,
nsamples=1e4, burnin=2000)
captured = capsys.readouterr()
assert output is None
assert "The number of burned-in samples (2000) is greater" in captured.out
def test_mcmc_leastsq_error(capsys):
output = mc3.sample(data, uncert, func=quad, params=np.copy(params),
sampler=sampler, indparams=[x], pstep=pstep,
leastsq='invalid', nsamples=1e4, burnin=100)
captured = capsys.readouterr()
assert output is None
assert "Invalid 'leastsq' input (invalid). Must select from " \
"['lm', 'trf']." in captured.out
@pytest.mark.skip
def test_mcmc_outputs():
# Check that outputs are there and have the right names.
pass
def test_entry_point_version(capfd):
subprocess.call('mc3 -v'.split())
if sys.version_info.major == 3:
captured = capfd.readouterr().out
else:
captured = capfd.readouterr().err
assert captured == f'mc3 version {mc3.__version__}.\n'
def test_mcmc_entry_point(tmp_path):
os.chdir(str(tmp_path))
p = tmp_path / 'MCMC.cfg'
p.write_text(u'''[MCMC]
data = data.npz
indparams = indp.npz
func = quad quadratic
params = 10.0 -2.0 0.1
pmin = -25.0 -10.0 -10.0
pmax = 30.0 10.0 10.0
pstep = 1.0 0.5 0.1
nsamples = 1e4
nchains = 7
sampler = snooker
grtest = True
burnin = 100
plots = True
savefile = MCMC_test.npz''')
p = tmp_path / 'quadratic.py'
p.write_text(u'''
def quad(p, x):
y = p[0] + p[1]*x + p[2]*x**2.0
return y''')
# Create synthetic dataset:
x = np.linspace(0, 10, 1000) # Independent model variable
p0 = [3, -2.4, 0.5] # True-underlying model parameters
y = quad(p0, x) # Noiseless model
uncert = np.sqrt(np.abs(y)) # Data points uncertainty
error = np.random.normal(0, uncert) # Noise for the data
data = y + error # Noisy data set
# Store data set and other inputs:
mc3.utils.savebin([data, uncert], 'data.npz')
mc3.utils.savebin([x], 'indp.npz')
subprocess.call('mc3 -c MCMC.cfg'.split())
assert "MCMC_test.npz" in os.listdir(".")
assert "MCMC_test_trace.png" in os.listdir(".")
assert "MCMC_test_pairwise.png" in os.listdir(".")
assert "MCMC_test_posterior.png" in os.listdir(".")
assert "MCMC_test_model.png" in os.listdir(".")
|
|
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
Huang Quanyong (wo1fSea)
quanyongh@foxmail.com
Date:
2016/10/19
Description:
PackerInterface.py
----------------------------------------------------------------------------"""
import os
from .. import Utils
from .AtlasInterface import AtlasInterface
SIZE_SEQUENCE = [2 ** ind for ind in range(32)]
def multi_pack_handler(args):
packer, args = args
if isinstance(args, (list, tuple)):
packer.pack(*args)
elif isinstance(args, dict):
packer.pack(**args)
class PackerInterface(object):
"""
interface of packer
"""
ATLAS_TYPE = AtlasInterface
def __init__(self, bg_color=0x00000000, texture_format=".png", max_width=4096, max_height=4096, enable_rotated=True,
force_square=False, border_padding=2, shape_padding=2, inner_padding=0, trim_mode=0,
reduce_border_artifacts=False, extrude=0, atlas_format=Utils.ATLAS_FORMAT_PLIST):
"""
init a packer
:param bg_color: background color of output image.
:param texture_format: texture format of the output file
:param max_width: the maximum width
:param max_height: the maximum height
:param enable_rotated: allow the rotating of sprites if there is a better fit in the texture
:param force_square: forces the texture to have a squared size
:param border_padding: space between the sprites and the border of the sprite sheet
:param shape_padding: space between sprites
:param inner_padding: adds transparent pixels to the inside of the sprite, growing it
:param trim_mode: pixels with an alpha value below this value will be trimmed. when 0, disable
:param reduce_border_artifacts: adds color to transparent pixels by repeating a sprite's outer color values
:param extrude: extrude repeats the sprite's pixels at the border. Sprite's size is not changed.
:param atlas_format: texture config output type: 'plist' or 'json'. default: 'plist'
"""
self.bg_color = bg_color
self.texture_format = texture_format
self.max_width = max_width
self.max_height = max_height
self.enable_rotated = enable_rotated
self.force_square = force_square
self.border_padding = border_padding
self.shape_padding = shape_padding
self.inner_padding = inner_padding
self.extrude = extrude
self.trim_mode = trim_mode
self.reduce_border_artifacts = reduce_border_artifacts
self.atlas_format = atlas_format
@staticmethod
def _calculate_area(image_rect_list, inner_padding):
area = 0
for image_rect in image_rect_list:
area += image_rect.area + \
image_rect.width * inner_padding + \
image_rect.height * inner_padding + \
inner_padding ** 2
return area
@staticmethod
def _cal_init_size(area, min_width, min_height, max_width, max_height):
min_short = min(min_width, min_height)
min_long = max(min_width, min_height)
max_short = min(max_width, max_height)
max_long = max(max_width, max_height)
start_i = -1
start_j = -1
for i, l in enumerate(SIZE_SEQUENCE):
if l >= min_short and start_i == -1:
start_i = i
if l >= min_long and start_j == -1:
start_j = i
short = -1
long = -1
for j in range(start_j, len(SIZE_SEQUENCE)):
l = SIZE_SEQUENCE[j]
if (short != -1 and long != -1) or l > max_long:
break
for i in range(start_i, j + 1):
s = SIZE_SEQUENCE[i]
if (short != -1 and long != -1) or s > max_short:
break
if area <= l * s:
short, long = s, l
if short == -1 and long == -1:
return tuple((max_height, max_width))
if min_width == min_long:
return tuple((long, short))
else:
return tuple((short, long))
def _init_atlas_list(self, image_rect_list):
min_width, min_height = 0, 0
for image_rect in image_rect_list:
if min_width < image_rect.width:
min_width = image_rect.width
if min_height < image_rect.height:
min_height = image_rect.height
min_width += self.inner_padding
min_height += self.inner_padding
if self.enable_rotated:
if min(min_width, min_height) > min(self.max_width, self.max_height) or \
max(min_width, min_height) > max(self.max_width, self.max_height):
raise ValueError("size of image is larger than max size.")
else:
if min_height > self.max_height or min_width > self.max_width:
raise ValueError("size of image is larger than max size.")
atlas_list = []
area = self._calculate_area(image_rect_list, self.inner_padding)
w, h = self._cal_init_size(area, min_width, min_height, self.max_width, self.max_height)
atlas_list.append(self.ATLAS_TYPE(w, h, self.max_width, self.max_height,
force_square=self.force_square, border_padding=self.border_padding,
shape_padding=self.shape_padding, inner_padding=self.inner_padding))
area = area - w * h
while area > 0:
w, h = self._cal_init_size(area, 0, 0, self.max_width, self.max_height)
area = area - w * h
atlas_list.append(self.ATLAS_TYPE(w, h, self.max_width, self.max_height,
force_square=self.force_square, border_padding=self.border_padding,
shape_padding=self.shape_padding, inner_padding=self.inner_padding))
return atlas_list
def _pack(self, image_rect_list):
raise NotImplementedError
def pack(self, input_images, output_name, output_path="", input_base_path=None):
"""
pack the input images to sheets
:param input_images: a list of input image paths or a input dir path
:param output_name: the output file name
:param output_path: the output file path
:param input_base_path: the base path of input files
:return:
"""
if isinstance(input_images, (tuple, list)):
image_rects = Utils.load_images_from_paths(input_images)
else:
image_rects = Utils.load_images_from_dir(input_images)
if self.trim_mode:
for image_rect in image_rects:
image_rect.trim(self.trim_mode)
if self.extrude:
for image_rect in image_rects:
image_rect.extrude(self.extrude)
atlas_list = self._pack(image_rects)
assert "%d" in output_name or len(atlas_list) == 1, 'more than one output image, but no "%d" in output_name'
for i, atlas in enumerate(atlas_list):
texture_file_name = output_name if "%d" not in output_name else output_name % i
packed_plist = atlas.dump_plist("%s%s" % (texture_file_name, self.texture_format), input_base_path,
self.atlas_format)
packed_image = atlas.dump_image(self.bg_color)
if self.reduce_border_artifacts:
packed_image = Utils.alpha_bleeding(packed_image)
atlas_data_ext = Utils.get_atlas_data_ext(self.atlas_format)
Utils.save_atlas_data(packed_plist, os.path.join(output_path, "%s%s" % (texture_file_name, atlas_data_ext)),
self.atlas_format)
Utils.save_image(packed_image, os.path.join(output_path, "%s%s" % (texture_file_name, self.texture_format)))
def multi_pack(self, pack_args_list):
"""
pack with multiprocessing
:param pack_args_list: list of pack args
:return:
"""
import multiprocessing
pool_size = multiprocessing.cpu_count() * 2
pool = multiprocessing.Pool(processes=pool_size)
pool.map(multi_pack_handler, zip([self] * len(pack_args_list), pack_args_list))
pool.close()
pool.join()
|
|
"""
Utility functions to load data from the UTLC challenge (Unsupervised Transfer
Learning).
The user should use the load_ndarray_dataset or load_sparse_dataset function
See the file ${PYLEARN2_DATA_PATH}/UTLC/README for details on the datasets.
"""
import cPickle
import gzip
import os
import numpy
import theano
import pylearn2.datasets.filetensor as ft
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.rng import make_np_rng
def load_ndarray_dataset(name, normalize=True, transfer=False,
normalize_on_the_fly=False, randomize_valid=False,
randomize_test=False):
"""
Load the train,valid,test data for the dataset `name` and return it in
ndarray format.
We suppose the data was created with ift6266h11/pretraitement/to_npy.py
that shuffle the train. So the train should already be shuffled.
Parameters
----------
name : 'avicenna', 'harry', 'rita', 'sylvester' or 'ule'
Which dataset to load
normalize : bool
If True, we normalize the train dataset before returning it
transfer : bool
If True also return the transfer labels
normalize_on_the_fly : bool
If True, we return a Theano Variable that will give as output the
normalized value. If the user only take a subtensor of that variable,
Theano optimization should make that we will only have in memory the
subtensor portion that is computed in normalized form. We store the
original data in shared memory in its original dtype. This is usefull
to have the original data in its original dtype in memory to same
memory. Especialy usefull to be able to use rita and harry with 1G per
jobs.
randomize_valid : bool
Do we randomize the order of the valid set? We always use the same
random order If False, return in the same order as downloaded on the
web
randomize_test : bool
Do we randomize the order of the test set? We always use the same
random order If False, return in the same order as downloaded on the
web
Returns
-------
train, valid, test : ndarrays
Datasets returned if transfer = False
train, valid, test, transfer : ndarrays
Datasets returned if transfer = False
"""
assert not (normalize and normalize_on_the_fly), \
"Can't normalize in 2 way at the same time!"
assert name in ['avicenna', 'harry', 'rita', 'sylvester', 'ule']
common = os.path.join(
preprocess('${PYLEARN2_DATA_PATH}'), 'UTLC', 'filetensor', name + '_')
trname, vname, tename = [
common + subset + '.ft' for subset in ['train', 'valid', 'test']]
train = load_filetensor(trname)
valid = load_filetensor(vname)
test = load_filetensor(tename)
if randomize_valid:
rng = make_np_rng(None, [1, 2, 3, 4], which_method='permutation')
perm = rng.permutation(valid.shape[0])
valid = valid[perm]
if randomize_test:
rng = make_np_rng(None, [1, 2, 3, 4], which_method='permutation')
perm = rng.permutation(test.shape[0])
test = test[perm]
if normalize or normalize_on_the_fly:
if normalize_on_the_fly:
# Shared variables of the original type
train = theano.shared(train, borrow=True, name=name + "_train")
valid = theano.shared(valid, borrow=True, name=name + "_valid")
test = theano.shared(test, borrow=True, name=name + "_test")
# Symbolic variables cast into floatX
train = theano.tensor.cast(train, theano.config.floatX)
valid = theano.tensor.cast(valid, theano.config.floatX)
test = theano.tensor.cast(test, theano.config.floatX)
else:
train = numpy.asarray(train, theano.config.floatX)
valid = numpy.asarray(valid, theano.config.floatX)
test = numpy.asarray(test, theano.config.floatX)
if name == "ule":
train /= 255
valid /= 255
test /= 255
elif name in ["avicenna", "sylvester"]:
if name == "avicenna":
train_mean = 514.62154022835455
train_std = 6.829096494224145
else:
train_mean = 403.81889927027686
train_std = 96.43841050784053
train -= train_mean
valid -= train_mean
test -= train_mean
train /= train_std
valid /= train_std
test /= train_std
elif name == "harry":
std = 0.69336046033925791 # train.std()slow to compute
train /= std
valid /= std
test /= std
elif name == "rita":
v = numpy.asarray(230, dtype=theano.config.floatX)
train /= v
valid /= v
test /= v
else:
raise Exception(
"This dataset don't have its normalization defined")
if transfer:
transfer = load_ndarray_transfer(name)
return train, valid, test, transfer
else:
return train, valid, test
def load_sparse_dataset(name, normalize=True, transfer=False,
randomize_valid=False,
randomize_test=False):
"""
Load the train,valid,test data for the dataset `name` and return it in
sparse format.
We suppose the data was created with ift6266h11/pretraitement/to_npy.py
that shuffle the train. So the train should already be shuffled.
name : 'avicenna', 'harry', 'rita', 'sylvester' or 'ule'
Which dataset to load
normalize : bool
If True, we normalize the train dataset before returning it
transfer :
If True also return the transfer label
randomize_valid : bool
Do we randomize the order of the valid set? We always use the same
random order If False, return in the same order as downloaded on the
web
randomize_test : bool
Do we randomize the order of the test set? We always use the same
random order If False, return in the same order as downloaded on the
web
Returns
-------
train, valid, test : ndarrays
Datasets returned if transfer = False
train, valid, test, transfer : ndarrays
Datasets returned if transfer = False
"""
assert name in ['harry', 'terry', 'ule']
common = os.path.join(
preprocess('${PYLEARN2_DATA_PATH}'), 'UTLC', 'sparse', name + '_')
trname, vname, tename = [
common + subset + '.npy' for subset in ['train', 'valid', 'test']]
train = load_sparse(trname)
valid = load_sparse(vname)
test = load_sparse(tename)
# Data should already be in csr format that support
# this type of indexing.
if randomize_valid:
rng = make_np_rng(None, [1, 2, 3, 4], which_method='permutation')
perm = rng.permutation(valid.shape[0])
valid = valid[perm]
if randomize_test:
rng = make_np_rng(None, [1, 2, 3, 4], which_method='permutation')
perm = rng.permutation(test.shape[0])
test = test[perm]
if normalize:
if name == "ule":
train = train.astype(theano.config.floatX) / 255
valid = valid.astype(theano.config.floatX) / 255
test = test.astype(theano.config.floatX) / 255
elif name == "harry":
train = train.astype(theano.config.floatX)
valid = valid.astype(theano.config.floatX)
test = test.astype(theano.config.floatX)
std = 0.69336046033925791 # train.std()slow to compute
train = (train) / std
valid = (valid) / std
test = (test) / std
elif name == "terry":
train = train.astype(theano.config.floatX)
valid = valid.astype(theano.config.floatX)
test = test.astype(theano.config.floatX)
train = (train) / 300
valid = (valid) / 300
test = (test) / 300
else:
raise Exception(
"This dataset don't have its normalization defined")
if transfer:
fname = os.path.join(preprocess("${PYLEARN2_DATA_PATH}"),
"UTLC",
"filetensor",
name + "_transfer.ft")
transfer = load_filetensor(fname)
return train, valid, test, transfer
else:
return train, valid, test
def load_ndarray_transfer(name):
"""
Load the transfer labels for the training set of data set `name`.
Parameters
----------
name : 'avicenna', 'harry', 'rita', 'sylvester' or 'ule'
Which dataset to load
Returns
-------
transfer : ndarray
Transfer dataset loaded
"""
assert name in ['avicenna', 'harry', 'rita', 'sylvester', 'terry', 'ule']
fname = os.path.join(preprocess('${PYLEARN2_DATA_PATH}'),
'UTLC',
'filetensor', name + '_transfer.ft')
transfer = load_filetensor(fname)
return transfer
def load_ndarray_label(name):
"""
Load the train,valid,test label data for the dataset `name` and return it
in ndarray format. This is only available for the toy dataset ule.
Parameters
----------
name : 'ule'
Must be 'ule'
Returns
-------
train_l. valid_l, test_l : ndarray
Label data loaded
"""
assert name in ['ule']
common_path = os.path.join(
preprocess('${PYLEARN2_DATA_PATH}'), 'UTLC', 'filetensor', name + '_')
trname, vname, tename = [common_path + subset + '.tf'
for subset in ['trainl', 'validl', 'testl']]
trainl = load_filetensor(trname)
validl = load_filetensor(vname)
testl = load_filetensor(tename)
return trainl, validl, testl
def load_filetensor(fname):
"""
.. todo::
WRITEME
"""
f = None
try:
if not os.path.exists(fname):
fname = fname + '.gz'
f = gzip.open(fname)
elif fname.endswith('.gz'):
f = gzip.open(fname)
else:
f = open(fname)
d = ft.read(f)
finally:
if f:
f.close()
return d
def load_sparse(fname):
"""
.. todo::
WRITEME
"""
f = None
try:
if not os.path.exists(fname):
fname = fname + '.gz'
f = gzip.open(fname)
elif fname.endswith('.gz'):
f = gzip.open(fname)
else:
f = open(fname)
d = cPickle.load(f)
finally:
if f:
f.close()
return d
|
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Initial ggrc_workflows migration
Revision ID: 1d33919af441
Revises: None
Create Date: 2014-05-29 01:00:47.198955
"""
# revision identifiers, used by Alembic.
revision = '1d33919af441'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('slug', sa.String(length=250), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_tasks_context_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug', name='uq_tasks'),
sa.UniqueConstraint('title', name='uq_t_tasks')
)
op.create_index('fk_tasks_contexts', 'tasks', ['context_id'], unique=False)
op.create_table('task_entries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_task_entries_context_id'),
sa.PrimaryKeyConstraint('id')
)
op.create_index('fk_task_entries_contexts', 'task_entries', ['context_id'], unique=False)
op.create_table('workflows',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('slug', sa.String(length=250), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_workflows_context_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug', name='uq_workflows'),
sa.UniqueConstraint('title', name='uq_t_workflows')
)
op.create_index('fk_workflows_contexts', 'workflows', ['context_id'], unique=False)
op.create_table('workflow_people',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('workflow_id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_workflow_people_context_id'),
sa.ForeignKeyConstraint(['person_id'], ['people.id'], name='fk_workflow_people_person_id'),
sa.ForeignKeyConstraint(['workflow_id'], ['workflows.id'], name='fk_workflow_people_workflow_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('workflow_id', 'person_id')
)
op.create_index('fk_workflow_people_contexts', 'workflow_people', ['context_id'], unique=False)
op.create_index('ix_person_id', 'workflow_people', ['person_id'], unique=False)
op.create_index('ix_workflow_id', 'workflow_people', ['workflow_id'], unique=False)
op.create_table('workflow_tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('workflow_id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.Integer(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_workflow_tasks_context_id'),
sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], name='fk_workflow_tasks_task_id'),
sa.ForeignKeyConstraint(['workflow_id'], ['workflows.id'], name='fk_workflow_tasks_workflow_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('workflow_id', 'task_id')
)
op.create_index('fk_workflow_tasks_contexts', 'workflow_tasks', ['context_id'], unique=False)
op.create_table('task_groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('workflow_id', sa.Integer(), nullable=False),
sa.Column('contact_id', sa.Integer(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('slug', sa.String(length=250), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['contact_id'], ['people.id'], name='fk_task_groups_contact_id'),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_task_groups_context_id'),
sa.ForeignKeyConstraint(['workflow_id'], ['workflows.id'], name='fk_task_groups_workflow_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug', name='uq_task_groups'),
sa.UniqueConstraint('title', name='uq_t_task_groups')
)
op.create_index('fk_task_groups_contact', 'task_groups', ['contact_id'], unique=False)
op.create_index('fk_task_groups_contexts', 'task_groups', ['context_id'], unique=False)
op.create_table('workflow_objects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('workflow_id', sa.Integer(), nullable=False),
sa.Column('object_id', sa.Integer(), nullable=False),
sa.Column('object_type', sa.String(length=250), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_workflow_objects_context_id'),
sa.ForeignKeyConstraint(['workflow_id'], ['workflows.id'], name='fk_workflow_objects_workflow_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('workflow_id', 'object_id', 'object_type')
)
op.create_index('fk_workflow_objects_contexts', 'workflow_objects', ['context_id'], unique=False)
op.create_index('ix_workflow_id', 'workflow_objects', ['workflow_id'], unique=False)
op.create_table('task_group_objects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_group_id', sa.Integer(), nullable=False),
sa.Column('object_id', sa.Integer(), nullable=False),
sa.Column('object_type', sa.String(length=250), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_task_group_objects_context_id'),
sa.ForeignKeyConstraint(['task_group_id'], ['task_groups.id'], name='fk_task_group_objects_task_group_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('task_group_id', 'object_id', 'object_type')
)
op.create_index('fk_task_group_objects_contexts', 'task_group_objects', ['context_id'], unique=False)
op.create_index('ix_task_group_id', 'task_group_objects', ['task_group_id'], unique=False)
op.create_table('task_group_tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_group_id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.Integer(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], name='fk_task_group_tasks_context_id'),
sa.ForeignKeyConstraint(['task_group_id'], ['task_groups.id'], name='fk_task_group_tasks_task_group_id'),
sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], name='fk_task_group_tasks_task_id'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('task_group_id', 'task_id')
)
op.create_index('fk_task_group_tasks_contexts', 'task_group_tasks', ['context_id'], unique=False)
def downgrade():
op.drop_constraint('fk_task_group_tasks_context_id', 'task_group_tasks', type_='foreignkey')
op.drop_index('fk_task_group_tasks_contexts', table_name='task_group_tasks')
op.drop_table('task_group_tasks')
op.drop_index('ix_task_group_id', table_name='task_group_objects')
op.drop_constraint('fk_task_group_objects_context_id', 'task_group_objects', type_='foreignkey')
op.drop_index('fk_task_group_objects_contexts', table_name='task_group_objects')
op.drop_table('task_group_objects')
op.drop_index('ix_workflow_id', table_name='workflow_objects')
op.drop_constraint('fk_workflow_objects_context_id', 'workflow_objects', type_='foreignkey')
op.drop_index('fk_workflow_objects_contexts', table_name='workflow_objects')
op.drop_table('workflow_objects')
op.drop_constraint('fk_task_groups_context_id', 'task_groups', type_='foreignkey')
op.drop_index('fk_task_groups_contexts', table_name='task_groups')
op.drop_constraint('fk_task_groups_contact_id', 'task_groups', type_='foreignkey')
op.drop_index('fk_task_groups_contact', table_name='task_groups')
op.drop_table('task_groups')
op.drop_constraint('fk_workflow_tasks_context_id', 'workflow_tasks', type_='foreignkey')
op.drop_index('fk_workflow_tasks_contexts', table_name='workflow_tasks')
op.drop_table('workflow_tasks')
op.drop_index('ix_workflow_id', table_name='workflow_people')
op.drop_constraint('fk_workflow_people_person_id', 'workflow_people', type_='foreignkey')
op.drop_index('ix_person_id', table_name='workflow_people')
op.drop_constraint('fk_workflow_people_context_id', 'workflow_people', type_='foreignkey')
op.drop_index('fk_workflow_people_contexts', table_name='workflow_people')
op.drop_table('workflow_people')
op.drop_constraint('fk_workflows_context_id', 'workflows', type_='foreignkey')
op.drop_index('fk_workflows_contexts', table_name='workflows')
op.drop_table('workflows')
op.drop_constraint('fk_task_entries_context_id', 'task_entries', type_='foreignkey')
op.drop_index('fk_task_entries_contexts', table_name='task_entries')
op.drop_table('task_entries')
op.drop_constraint('fk_tasks_context_id', 'tasks', type_='foreignkey')
op.drop_index('fk_tasks_contexts', table_name='tasks')
op.drop_table('tasks')
|
|
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import random
import functools
import numpy as np
from numpy import array, matrix, identity, dot, sqrt, double
from numpy.testing import (
assert_array_equal, assert_array_less, assert_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_allclose, assert_)
import pytest
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
import scipy.linalg
from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
expm, expm_frechet, expm_cond, norm)
from scipy.linalg import _matfuncs_inv_ssq
import scipy.linalg._expm_frechet
from scipy.optimize import minimize
def _get_al_mohy_higham_2012_experiment_1():
"""
Return the test matrix from Experiment (1) of [1]_.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.2210e-1, 3e4],
[0, 0, 0, 3.0744e-1]], dtype=float)
return A
class TestSignM(object):
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
[-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
[-4.08,0.56,-4.92,-7.6,0.56],
[-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
[4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
r = signm(a)
assert_array_almost_equal(r,cr)
def test_defective1(self):
a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
r = signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2,-24.2,69.5,49.8,7.0],
[-9.2,5.2,-18.0,-16.8,-2.0],
[-10.0,6.0,-20.0,-18.0,-2.0],
[-9.6,9.6,-25.5,-15.4,-2.0],
[9.8,-4.8,18.0,18.2,2.0]))
r = signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
r = signm(a, disp=False)
#XXX: what would be the correct result?
class TestLogM(object):
def test_nils(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
m = (identity(7)*3.1+0j)-a
logm(m, disp=False)
#XXX: what would be the correct result?
def test_al_mohy_higham_2012_experiment_1_logm(self):
# The logm completes the round trip successfully.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_logm, info = logm(A, disp=False)
A_round_trip = expm(A_logm)
assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
def test_al_mohy_higham_2012_experiment_1_funm_log(self):
# The raw funm with np.log does not complete the round trip.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_funm_log, info = funm(A, np.log, disp=False)
A_round_trip = expm(A_funm_log)
assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
# Eigenvalues are related to the branch cut.
W = np.linalg.eigvals(M)
err_msg = 'M:{0} eivals:{1}'.format(M, W)
# Check sqrtm round trip because it is used within logm.
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
# Check logm round trip.
M_logm, info = logm(M, disp=False)
M_logm_round_trip = expm(M_logm)
assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_logm, info = logm(M, disp=False)
M_round_trip = expm(M_logm)
assert_allclose(M_round_trip, M)
def test_logm_type_preservation_and_conversion(self):
# The logm matrix function should preserve the type of a matrix
# whose eigenvalues are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
def test_complex_spectrum_real_logm(self):
# This matrix has complex eigenvalues and real logm.
# Its output dtype depends on its input dtype.
M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
for dt in float, complex:
X = np.array(M, dtype=dt)
w = scipy.linalg.eigvals(X)
assert_(1e-2 < np.absolute(w.imag).sum())
Y, info = logm(X, disp=False)
assert_(np.issubdtype(Y.dtype, np.inexact))
assert_allclose(expm(Y), X)
def test_real_mixed_sign_spectrum(self):
# These matrices have real eigenvalues with mixed signs.
# The output logm dtype is complex, regardless of input dtype.
for M in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]]):
for dt in float, complex:
A = np.array(M, dtype=dt)
A_logm, info = logm(A, disp=False)
assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
def test_exactly_singular(self):
A = np.array([[0, 0], [1j, 1j]])
B = np.asarray([[1, 1], [0, 0]])
for M in A, A.T, B, B.T:
expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_nearly_singular(self):
M = np.array([[1e-100]])
expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_opposite_sign_complex_eigenvalues(self):
# See gh-6113
E = [[0, 1], [-1, 0]]
L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 4], [0, -1j]]
L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 0], [0, -1j]]
L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
class TestSqrtM(object):
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_bad(self):
# See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
e = 2**-5
se = sqrt(e)
a = array([[1.0,0,0,1],
[0,e,0,0],
[0,0,e,0],
[0,0,0,1]])
sa = array([[1,0,0,0.5],
[0,se,0,0],
[0,0,se,0],
[0,0,0,1]])
n = a.shape[0]
assert_array_almost_equal(dot(sa,sa),a)
# Check default sqrtm.
esa = sqrtm(a, disp=False, blocksize=n)[0]
assert_array_almost_equal(dot(esa,esa),a)
# Check sqrtm with 2x2 blocks.
esa = sqrtm(a, disp=False, blocksize=2)[0]
assert_array_almost_equal(dot(esa,esa),a)
def test_sqrtm_type_preservation_and_conversion(self):
# The sqrtm matrix function should preserve the type of a matrix
# whose eigenvalues are nonnegative with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]],
[[1, 1], [1, 1]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_blocksizes(self):
# Make sure I do not goof up the blocksizes when they do not divide n.
np.random.seed(1234)
for n in range(1, 8):
A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
for blocksize in range(1, 10):
A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
assert_allclose(A_sqrtm_default, A_sqrtm_new)
def test_al_mohy_higham_2012_experiment_1(self):
# Matrix square root of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
A_sqrtm, info = sqrtm(A, disp=False)
A_round_trip = A_sqrtm.dot(A_sqrtm)
assert_allclose(A_round_trip, A, rtol=1e-5)
assert_allclose(np.tril(A_round_trip), np.tril(A))
def test_strict_upper_triangular(self):
# This matrix has no square root.
for dt in int, float:
A = np.array([
[0, 3, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=dt)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(np.isnan(A_sqrtm).all())
def test_weird_matrix(self):
# The square root of matrix B exists.
for dt in int, float:
A = np.array([
[0, 0, 1],
[0, 0, 0],
[0, 1, 0]], dtype=dt)
B = np.array([
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]], dtype=dt)
assert_array_equal(B, A.dot(A))
# But scipy sqrtm is not clever enough to find it.
B_sqrtm, info = sqrtm(B, disp=False)
assert_(np.isnan(B_sqrtm).all())
def test_disp(self):
from io import StringIO
np.random.seed(1234)
A = np.random.rand(3, 3)
B = sqrtm(A, disp=True)
assert_allclose(B.dot(B), A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh4866(self):
M = np.array([[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]])
R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)],
[0, 0, 0, 0],
[0, 0, 0, 0],
[sqrt(0.5), 0, 0, sqrt(0.5)]])
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh5336(self):
M = np.diag([2, 1, 0])
R = np.diag([sqrt(2), 1, 0])
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh7839(self):
M = np.zeros((2, 2))
R = np.zeros((2, 2))
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
class TestFractionalMatrixPower(object):
def test_round_trip_random_complex(self):
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_round_trip_random_float(self):
# This test is more annoying because it can hit the branch cut;
# this happens when the matrix has an eigenvalue
# with no imaginary component and with a real negative component,
# and it means that the principal branch does not exist.
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_larger_abs_fractional_matrix_powers(self):
np.random.seed(1234)
for n in (2, 3, 5):
for i in range(10):
M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
M_one_fifth = fractional_matrix_power(M, 0.2)
# Test the round trip.
M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
assert_allclose(M, M_round_trip)
# Test a large abs fractional power.
X = fractional_matrix_power(M, -5.4)
Y = np.linalg.matrix_power(M_one_fifth, -27)
assert_allclose(X, Y)
# Test another large abs fractional power.
X = fractional_matrix_power(M, 3.8)
Y = np.linalg.matrix_power(M_one_fifth, 19)
assert_allclose(X, Y)
def test_random_matrices_and_powers(self):
# Each independent iteration of this fuzz test picks random parameters.
# It tries to hit some edge cases.
np.random.seed(1234)
nsamples = 20
for i in range(nsamples):
# Sample a matrix size and a random real power.
n = random.randrange(1, 5)
p = np.random.randn()
# Sample a random real or complex matrix.
matrix_scale = np.exp(random.randrange(-4, 5))
A = np.random.randn(n, n)
if random.choice((True, False)):
A = A + 1j * np.random.randn(n, n)
A = A * matrix_scale
# Check a couple of analytically equivalent ways
# to compute the fractional matrix power.
# These can be compared because they both use the principal branch.
A_power = fractional_matrix_power(A, p)
A_logm, info = logm(A, disp=False)
A_power_expm_logm = expm(A_logm * p)
assert_allclose(A_power, A_power_expm_logm)
def test_al_mohy_higham_2012_experiment_1(self):
# Fractional powers of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
# Test remainder matrix power.
A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
A_sqrtm, info = sqrtm(A, disp=False)
A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
A_power = fractional_matrix_power(A, 0.5)
assert_array_equal(A_rem_power, A_power)
assert_allclose(A_sqrtm, A_power)
assert_allclose(A_sqrtm, A_funm_sqrt)
# Test more fractional powers.
for p in (1/2, 5/3):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A, rtol=1e-2)
assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
def test_briggs_helper_function(self):
np.random.seed(1234)
for a in np.random.randn(10) + 1j * np.random.randn(10):
for k in range(5):
x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
x_expected = a ** np.exp2(-k) - 1
assert_allclose(x_observed, x_expected)
def test_type_preservation_and_conversion(self):
# The fractional_matrix_power matrix function should preserve
# the type of a matrix whose eigenvalues
# are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
def test_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
@pytest.mark.xfail(reason='Too unstable across LAPACKs.')
def test_singular(self):
# Negative fractional powers do not work with singular matrices.
for matrix_as_list in (
[[0, 0], [0, 0]],
[[1, 1], [1, 1]],
[[1, 2], [3, 6]],
[[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
# Check fractional powers both for float and for complex types.
for newtype in (float, complex):
A = np.array(matrix_as_list, dtype=newtype)
for p in (-0.7, -0.9, -2.4, -1.3):
A_power = fractional_matrix_power(A, p)
assert_(np.isnan(A_power).all())
for p in (0.2, 1.43):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
class TestExpM(object):
def test_zero(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_single_elt(self):
# See gh-5853
from scipy.sparse import csc_matrix
vOne = -2.02683397006j
vTwo = -2.12817566856j
mOne = csc_matrix([[vOne]], dtype='complex')
mTwo = csc_matrix([[vTwo]], dtype='complex')
outOne = expm(mOne)
outTwo = expm(mTwo)
assert_equal(type(outOne), type(mOne))
assert_equal(type(outTwo), type(mTwo))
assert_allclose(outOne[0, 0], complex(-0.44039415155949196,
-0.8978045395698304))
assert_allclose(outTwo[0, 0], complex(-0.52896401032626006,
-0.84864425749518878))
class TestExpmFrechet(object):
def test_expm_frechet(self):
# a test of the basic functionality
M = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A = np.array([
[1, 2],
[5, 6],
], dtype=float)
E = np.array([
[3, 4],
[7, 8],
], dtype=float)
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_small_norm_expm_frechet(self):
# methodically test matrices with a range of norms, for better coverage
M_original = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A_original = np.array([
[1, 2],
[5, 6],
], dtype=float)
E_original = np.array([
[3, 4],
[7, 8],
], dtype=float)
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
for ma, mb in m_neighbor_pairs:
ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
target_norm_1 = 0.5 * (ell_a + ell_b)
scale = target_norm_1 / A_original_norm_1
M = scale * M_original
A = scale * A_original
E = scale * E_original
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n,n))
E_original = rfunc(size=(n,n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_problematic_matrix(self):
# this test case uncovered a bug which has since been fixed
A = np.array([
[1.50591997, 1.93537998],
[0.41203263, 0.23443516],
], dtype=float)
E = np.array([
[1.87864034, 2.07055038],
[1.34102727, 0.67341123],
], dtype=float)
A_norm_1 = scipy.linalg.norm(A, 1)
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
@pytest.mark.slow
@pytest.mark.skip(reason='this test is deliberately slow')
def test_medium_matrix(self):
# profile this to see the speed difference
n = 1000
A = np.random.exponential(size=(n, n))
E = np.random.exponential(size=(n, n))
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
p = np.reshape(p, A.shape)
p_norm = norm(p)
perturbation = eps * p * (A_norm / p_norm)
X_prime = expm(A + perturbation)
scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
return -scaled_relative_error
def _normalized_like(A, B):
return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
def _relative_error(f, A, perturbation):
X = f(A)
X_prime = f(A + perturbation)
return norm(X_prime - X) / norm(X)
class TestExpmConditionNumber(object):
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
def test_univariate(self):
np.random.seed(12345)
for x in np.linspace(-5, 5, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for x in np.logspace(-2, 2, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for i in range(10):
A = np.random.randn(1, 1)
assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
@pytest.mark.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n*n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
|
|
import threading
import time
import traceback
import weakref
from collections import deque
import pandas as pd
import ibis.common.exceptions as com
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
from ibis.backends.base import Database
from ibis.backends.base.sql.compiler import DDL, DML
from ibis.backends.base.sql.ddl import (
AlterTable,
InsertSelect,
RenameTable,
fully_qualified_re,
)
from . import ddl
from .compat import HS2Error, impyla
class ImpalaDatabase(Database):
def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
)
def list_udfs(self, like=None):
return self.client.list_udfs(like=like, database=self.name)
def list_udas(self, like=None):
return self.client.list_udas(like=like, database=self.name)
class ImpalaConnection:
"""
Database connection wrapper
"""
def __init__(self, pool_size=8, database='default', **params):
self.params = params
self.database = database
self.lock = threading.Lock()
self.options = {}
self.max_pool_size = pool_size
self._connections = weakref.WeakSet()
self.connection_pool = deque(maxlen=pool_size)
with self.lock:
self.connection_pool_size = 0
def set_options(self, options):
self.options.update(options)
def close(self):
"""
Close all open Impyla sessions
"""
for impyla_connection in self._connections:
impyla_connection.close()
self._connections.clear()
self.connection_pool.clear()
def set_database(self, name):
self.database = name
def disable_codegen(self, disabled=True):
key = 'DISABLE_CODEGEN'
if disabled:
self.options[key] = '1'
elif key in self.options:
del self.options[key]
def execute(self, query):
if isinstance(query, (DDL, DML)):
query = query.compile()
cursor = self._get_cursor()
util.log(query)
try:
cursor.execute(query)
except Exception:
cursor.release()
util.log(
'Exception caused by {}: {}'.format(
query, traceback.format_exc()
)
)
raise
return cursor
def fetchall(self, query):
with self.execute(query) as cur:
results = cur.fetchall()
return results
def _get_cursor(self):
try:
cursor = self.connection_pool.popleft()
except IndexError: # deque is empty
with self.lock:
# NB: Do not put a lock around the entire if statement.
# This will cause a deadlock because _new_cursor calls the
# ImpalaCursor constructor which takes a lock to increment the
# connection pool size.
connection_pool_size = self.connection_pool_size
if connection_pool_size < self.max_pool_size:
return self._new_cursor()
raise com.InternalError('Too many concurrent / hung queries')
else:
if (
cursor.database != self.database
or cursor.options != self.options
):
return self._new_cursor()
cursor.released = False
return cursor
def _new_cursor(self):
params = self.params.copy()
con = impyla.connect(database=self.database, **params)
self._connections.add(con)
# make sure the connection works
cursor = con.cursor(user=params.get('user'), convert_types=True)
cursor.ping()
wrapper = ImpalaCursor(
cursor, self, con, self.database, self.options.copy()
)
wrapper.set_options()
return wrapper
def ping(self):
self._get_cursor()._cursor.ping()
def release(self, cur):
self.connection_pool.append(cur)
class ImpalaCursor:
def __init__(self, cursor, con, impyla_con, database, options):
self._cursor = cursor
self.con = con
self.impyla_con = impyla_con
self.database = database
self.options = options
self.released = False
with self.con.lock:
self.con.connection_pool_size += 1
def __del__(self):
try:
self._close_cursor()
except Exception:
pass
with self.con.lock:
self.con.connection_pool_size -= 1
def _close_cursor(self):
try:
self._cursor.close()
except HS2Error as e:
# connection was closed elsewhere
already_closed_messages = [
'invalid query handle',
'invalid session',
]
for message in already_closed_messages:
if message in e.args[0].lower():
break
else:
raise
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.release()
def set_options(self):
for k, v in self.options.items():
query = f'SET {k} = {v!r}'
self._cursor.execute(query)
@property
def description(self):
return self._cursor.description
def release(self):
if not self.released:
self.con.release(self)
self.released = True
def execute(self, stmt):
self._cursor.execute_async(stmt)
self._wait_synchronous()
def _wait_synchronous(self):
# Wait to finish, but cancel if KeyboardInterrupt
from impala.hiveserver2 import OperationalError
loop_start = time.time()
def _sleep_interval(start_time):
elapsed = time.time() - start_time
if elapsed < 0.05:
return 0.01
elif elapsed < 1.0:
return 0.05
elif elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
cur = self._cursor
try:
while True:
state = cur.status()
if self._cursor._op_state_is_error(state):
raise OperationalError("Operation is in ERROR_STATE")
if not cur._op_state_is_executing(state):
break
time.sleep(_sleep_interval(loop_start))
except KeyboardInterrupt:
util.log('Canceling query')
self.cancel()
raise
def is_finished(self):
return not self.is_executing()
def is_executing(self):
return self._cursor.is_executing()
def cancel(self):
self._cursor.cancel_operation()
def fetchone(self):
return self._cursor.fetchone()
def fetchall(self, columnar=False):
if columnar:
return self._cursor.fetchcolumnar()
else:
return self._cursor.fetchall()
class ImpalaTable(ir.TableExpr):
"""A physical table in the Impala-Hive metastore"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().source
def _match_name(self):
m = fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError(
'Cannot determine database name from {}'.format(
self._qualified_name
)
)
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def compute_stats(self, incremental=False):
"""Invoke Impala COMPUTE STATS command on the table."""
return self._client.compute_stats(
self._qualified_name, incremental=incremental
)
def invalidate_metadata(self):
self._client.invalidate_metadata(self._qualified_name)
def refresh(self):
self._client.refresh(self._qualified_name)
def metadata(self):
"""Return results of `DESCRIBE FORMATTED` statement."""
return self._client.describe_formatted(self._qualified_name)
describe_formatted = metadata
def files(self):
"""Return results of SHOW FILES statement."""
return self._client.show_files(self._qualified_name)
def drop(self):
"""Drop the table from the database."""
self._client.drop_table_or_view(self._qualified_name)
def truncate(self):
self._client.truncate_table(self._qualified_name)
def insert(
self,
obj=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""Insert into an Impala table.
Parameters
----------
obj
Table expression or DataFrame
overwrite
If True, will replace existing contents of table
partition
For partitioned tables, indicate the partition that's being
inserted into, either with an ordered list of partition keys or a
dict of partition field name to value. For example for the
partition (year=2007, month=7), this can be either (2007, 7) or
{'year': 2007, 'month': 7}.
validate
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
>>> t.insert(table_expr) # doctest: +SKIP
Completely overwrite contents
>>> t.insert(table_expr, overwrite=True) # doctest: +SKIP
"""
if values is not None:
raise NotImplementedError
with self._client._setup_insert(obj) as expr:
if validate:
existing_schema = self.schema()
insert_schema = expr.schema()
if not insert_schema.equals(existing_schema):
_validate_compatible(insert_schema, existing_schema)
if partition is not None:
partition_schema = self.partition_schema()
partition_schema_names = frozenset(partition_schema.names)
expr = expr.projection(
[
column
for column in expr.columns
if column not in partition_schema_names
]
)
else:
partition_schema = None
ast = self._client.compiler.to_ast(expr)
select = ast.queries[0]
statement = InsertSelect(
self._qualified_name,
select,
partition=partition,
partition_schema=partition_schema,
overwrite=overwrite,
)
return self._client.raw_sql(statement.compile())
def load_data(self, path, overwrite=False, partition=None):
"""Load data into an Impala table.
Parameters
----------
path
Data to load
overwrite
Overwrite the existing data in the entire table or indicated
partition
partition
If specified, the partition must already exist
"""
if partition is not None:
partition_schema = self.partition_schema()
else:
partition_schema = None
stmt = ddl.LoadData(
self._qualified_name,
path,
partition=partition,
partition_schema=partition_schema,
)
return self._client.raw_sql(stmt.compile())
@property
def name(self):
return self.op().name
def rename(self, new_name, database=None):
"""Rename table inside Impala.
References to the old table are no longer valid.
"""
m = fully_qualified_re.match(new_name)
if not m and database is None:
database = self._database
statement = RenameTable(
self._qualified_name, new_name, new_database=database
)
self._client.raw_sql(statement)
op = self.op().change_name(statement.new_qualified_name)
return type(self)(op)
@property
def is_partitioned(self):
"""True if the table is partitioned."""
return self.metadata().is_partitioned
def partition_schema(self):
"""Return the schema for the partition columns."""
schema = self.schema()
name_to_type = dict(zip(schema.names, schema.types))
result = self.partitions()
partition_fields = []
for x in result.columns:
if x not in name_to_type:
break
partition_fields.append((x, name_to_type[x]))
pnames, ptypes = zip(*partition_fields)
return sch.Schema(pnames, ptypes)
def add_partition(self, spec, location=None):
"""Add a new table partition.
This API creates any necessary new directories in HDFS.
Partition parameters can be set in a single DDL statement or you can
use `alter_partition` to set them after the fact.
"""
part_schema = self.partition_schema()
stmt = ddl.AddPartition(
self._qualified_name, spec, part_schema, location=location
)
return self._client.raw_sql(stmt)
def alter(
self,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
"""Change settings and parameters of the table.
Parameters
----------
location
For partitioned tables, you may want the alter_partition function
format
Table format
tbl_properties
Table properties
serde_properties
Serialization/deserialization properties
"""
def _run_ddl(**kwds):
stmt = AlterTable(self._qualified_name, **kwds)
return self._client.raw_sql(stmt)
return self._alter_table_helper(
_run_ddl,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
def set_external(self, is_external=True):
"""Toggle the `EXTERNAL` table property."""
self.alter(tbl_properties={'EXTERNAL': is_external})
def alter_partition(
self,
spec,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
"""Change settings and parameters of an existing partition.
Parameters
----------
spec
The partition keys for the partition being modified
location
Location of the partition
format
Table format
tbl_properties
Table properties
serde_properties
Serialization/deserialization properties
"""
part_schema = self.partition_schema()
def _run_ddl(**kwds):
stmt = ddl.AlterPartition(
self._qualified_name, spec, part_schema, **kwds
)
return self._client.raw_sql(stmt)
return self._alter_table_helper(
_run_ddl,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
def _alter_table_helper(self, f, **alterations):
results = []
for k, v in alterations.items():
if v is None:
continue
result = f(**{k: v})
results.append(result)
return results
def drop_partition(self, spec):
"""Drop an existing table partition."""
part_schema = self.partition_schema()
stmt = ddl.DropPartition(self._qualified_name, spec, part_schema)
return self._client.raw_sql(stmt)
def partitions(self):
"""Return information about the table's partitions.
Raises an exception if the table is not partitioned.
"""
return self._client.list_partitions(self._qualified_name)
def stats(self) -> pd.DataFrame:
"""Return results of `SHOW TABLE STATS`.
If not partitioned, contains only one row.
Returns
-------
DataFrame
Table statistics
"""
return self._client.table_stats(self._qualified_name)
def column_stats(self) -> pd.DataFrame:
"""Return results of `SHOW COLUMN STATS`.
Returns
-------
DataFrame
Column statistics
"""
return self._client.column_stats(self._qualified_name)
# ----------------------------------------------------------------------
# ORM-ish usability layer
class ScalarFunction:
def drop(self):
pass
class AggregateFunction:
def drop(self):
pass
def _validate_compatible(from_schema, to_schema):
if set(from_schema.names) != set(to_schema.names):
raise com.IbisInputError('Schemas have different names')
for name in from_schema:
lt = from_schema[name]
rt = to_schema[name]
if not lt.castable(rt):
raise com.IbisInputError(f'Cannot safely cast {lt!r} to {rt!r}')
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import json
from tests import base
from tests.base import BaseCLITestCase
from st2client.utils import httpclient
from st2client.commands.action import LIVEACTION_STATUS_RUNNING
from st2client.commands.action import LIVEACTION_STATUS_SUCCEEDED
from st2client.commands.action import LIVEACTION_STATUS_FAILED
from st2client.commands.action import LIVEACTION_STATUS_TIMED_OUT
from st2client.shell import Shell
__all__ = ["ActionExecutionTailCommandTestCase"]
# Mock objects
MOCK_LIVEACTION_1_RUNNING = {"id": "idfoo1", "status": LIVEACTION_STATUS_RUNNING}
MOCK_LIVEACTION_1_SUCCEEDED = {"id": "idfoo1", "status": LIVEACTION_STATUS_SUCCEEDED}
MOCK_LIVEACTION_2_FAILED = {"id": "idfoo2", "status": LIVEACTION_STATUS_FAILED}
# Mock liveaction objects for ActionChain workflow
MOCK_LIVEACTION_3_RUNNING = {"id": "idfoo3", "status": LIVEACTION_STATUS_RUNNING}
MOCK_LIVEACTION_3_CHILD_1_RUNNING = {
"id": "idchild1",
"context": {"parent": {"execution_id": "idfoo3"}, "chain": {"name": "task_1"}},
"status": LIVEACTION_STATUS_RUNNING,
}
MOCK_LIVEACTION_3_CHILD_1_SUCCEEDED = {
"id": "idchild1",
"context": {"parent": {"execution_id": "idfoo3"}, "chain": {"name": "task_1"}},
"status": LIVEACTION_STATUS_SUCCEEDED,
}
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_1 = {
"execution_id": "idchild1",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line ac 4\n",
}
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_2 = {
"execution_id": "idchild1",
"timestamp": "1505732598",
"output_type": "stderr",
"data": "line ac 5\n",
}
MOCK_LIVEACTION_3_CHILD_2_RUNNING = {
"id": "idchild2",
"context": {"parent": {"execution_id": "idfoo3"}, "chain": {"name": "task_2"}},
"status": LIVEACTION_STATUS_RUNNING,
}
MOCK_LIVEACTION_3_CHILD_2_FAILED = {
"id": "idchild2",
"context": {"parent": {"execution_id": "idfoo3"}, "chain": {"name": "task_2"}},
"status": LIVEACTION_STATUS_FAILED,
}
MOCK_LIVEACTION_3_CHILD_2_OUTPUT_1 = {
"execution_id": "idchild2",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line ac 100\n",
}
MOCK_LIVEACTION_3_SUCCEDED = {"id": "idfoo3", "status": LIVEACTION_STATUS_SUCCEEDED}
# Mock objects for Orquesta workflow execution
MOCK_LIVEACTION_4_RUNNING = {"id": "idfoo4", "status": LIVEACTION_STATUS_RUNNING}
MOCK_LIVEACTION_4_CHILD_1_RUNNING = {
"id": "idorquestachild1",
"context": {
"orquesta": {"task_name": "task_1"},
"parent": {"execution_id": "idfoo4"},
},
"status": LIVEACTION_STATUS_RUNNING,
}
MOCK_LIVEACTION_4_CHILD_1_1_RUNNING = {
"id": "idorquestachild1_1",
"context": {
"orquesta": {"task_name": "task_1"},
"parent": {"execution_id": "idorquestachild1"},
},
"status": LIVEACTION_STATUS_RUNNING,
}
MOCK_LIVEACTION_4_CHILD_1_SUCCEEDED = {
"id": "idorquestachild1",
"context": {
"orquesta": {
"task_name": "task_1",
},
"parent": {"execution_id": "idfoo4"},
},
"status": LIVEACTION_STATUS_SUCCEEDED,
}
MOCK_LIVEACTION_4_CHILD_1_1_SUCCEEDED = {
"id": "idorquestachild1_1",
"context": {
"orquesta": {
"task_name": "task_1",
},
"parent": {"execution_id": "idorquestachild1"},
},
"status": LIVEACTION_STATUS_SUCCEEDED,
}
MOCK_LIVEACTION_4_CHILD_1_OUTPUT_1 = {
"execution_id": "idorquestachild1",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line orquesta 4\n",
}
MOCK_LIVEACTION_4_CHILD_1_OUTPUT_2 = {
"execution_id": "idorquestachild1",
"timestamp": "1505732598",
"output_type": "stderr",
"data": "line orquesta 5\n",
}
MOCK_LIVEACTION_4_CHILD_1_1_OUTPUT_1 = {
"execution_id": "idorquestachild1_1",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line orquesta 4\n",
}
MOCK_LIVEACTION_4_CHILD_1_1_OUTPUT_2 = {
"execution_id": "idorquestachild1_1",
"timestamp": "1505732598",
"output_type": "stderr",
"data": "line orquesta 5\n",
}
MOCK_LIVEACTION_4_CHILD_2_RUNNING = {
"id": "idorquestachild2",
"context": {
"orquesta": {
"task_name": "task_2",
},
"parent": {"execution_id": "idfoo4"},
},
"status": LIVEACTION_STATUS_RUNNING,
}
MOCK_LIVEACTION_4_CHILD_2_TIMED_OUT = {
"id": "idorquestachild2",
"context": {
"orquesta": {
"task_name": "task_2",
},
"parent": {"execution_id": "idfoo4"},
},
"status": LIVEACTION_STATUS_TIMED_OUT,
}
MOCK_LIVEACTION_4_CHILD_2_OUTPUT_1 = {
"execution_id": "idorquestachild2",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line orquesta 100\n",
}
MOCK_LIVEACTION_4_SUCCEDED = {"id": "idfoo4", "status": LIVEACTION_STATUS_SUCCEEDED}
# Mock objects for simple actions
MOCK_OUTPUT_1 = {
"execution_id": "idfoo3",
"timestamp": "1505732598",
"output_type": "stdout",
"data": "line 1\n",
}
MOCK_OUTPUT_2 = {
"execution_id": "idfoo3",
"timestamp": "1505732598",
"output_type": "stderr",
"data": "line 2\n",
}
class ActionExecutionTailCommandTestCase(BaseCLITestCase):
capture_output = True
def __init__(self, *args, **kwargs):
super(ActionExecutionTailCommandTestCase, self).__init__(*args, **kwargs)
self.shell = Shell()
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_1_SUCCEEDED), 200, "OK"
)
),
)
def test_tail_simple_execution_already_finished_succeeded(self):
argv = ["execution", "tail", "idfoo1"]
self.assertEqual(self.shell.run(argv), 0)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
self.assertIn("Execution idfoo1 has completed (status=succeeded)", stdout)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_2_FAILED), 200, "OK"
)
),
)
def test_tail_simple_execution_already_finished_failed(self):
argv = ["execution", "tail", "idfoo2"]
self.assertEqual(self.shell.run(argv), 0)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
self.assertIn("Execution idfoo2 has completed (status=failed)", stdout)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_1_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_simple_execution_running_no_data_produced(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo1"]
MOCK_EVENTS = [MOCK_LIVEACTION_1_SUCCEEDED]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Execution idfoo1 has completed (status=succeeded).
"""
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_3_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_simple_execution_running_with_data(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo3"]
MOCK_EVENTS = [
MOCK_LIVEACTION_3_RUNNING,
MOCK_OUTPUT_1,
MOCK_OUTPUT_2,
MOCK_LIVEACTION_3_SUCCEDED,
]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Execution idfoo3 has started.
line 1
line 2
Execution idfoo3 has completed (status=succeeded).
""".lstrip()
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_3_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_action_chain_workflow_execution(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo3"]
MOCK_EVENTS = [
# Workflow started running
MOCK_LIVEACTION_3_RUNNING,
# Child task 1 started running
MOCK_LIVEACTION_3_CHILD_1_RUNNING,
# Output produced by the child task
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_1,
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_2,
# Child task 1 finished
MOCK_LIVEACTION_3_CHILD_1_SUCCEEDED,
# Child task 2 started running
MOCK_LIVEACTION_3_CHILD_2_RUNNING,
# Output produced by child task
MOCK_LIVEACTION_3_CHILD_2_OUTPUT_1,
# Child task 2 finished
MOCK_LIVEACTION_3_CHILD_2_FAILED,
# Parent workflow task finished
MOCK_LIVEACTION_3_SUCCEDED,
]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Execution idfoo3 has started.
Child execution (task=task_1) idchild1 has started.
line ac 4
line ac 5
Child execution (task=task_1) idchild1 has finished (status=succeeded).
Child execution (task=task_2) idchild2 has started.
line ac 100
Child execution (task=task_2) idchild2 has finished (status=failed).
Execution idfoo3 has completed (status=succeeded).
""".lstrip()
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_4_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_orquesta_workflow_execution(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo4"]
MOCK_EVENTS = [
# Workflow started running
MOCK_LIVEACTION_4_RUNNING,
# Child task 1 started running
MOCK_LIVEACTION_4_CHILD_1_RUNNING,
# Output produced by the child task
MOCK_LIVEACTION_4_CHILD_1_OUTPUT_1,
MOCK_LIVEACTION_4_CHILD_1_OUTPUT_2,
# Child task 1 finished
MOCK_LIVEACTION_4_CHILD_1_SUCCEEDED,
# Child task 2 started running
MOCK_LIVEACTION_4_CHILD_2_RUNNING,
# Output produced by child task
MOCK_LIVEACTION_4_CHILD_2_OUTPUT_1,
# Child task 2 finished
MOCK_LIVEACTION_4_CHILD_2_TIMED_OUT,
# Parent workflow task finished
MOCK_LIVEACTION_4_SUCCEDED,
]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Execution idfoo4 has started.
Child execution (task=task_1) idorquestachild1 has started.
line orquesta 4
line orquesta 5
Child execution (task=task_1) idorquestachild1 has finished (status=succeeded).
Child execution (task=task_2) idorquestachild2 has started.
line orquesta 100
Child execution (task=task_2) idorquestachild2 has finished (status=timeout).
Execution idfoo4 has completed (status=succeeded).
""".lstrip()
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_4_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_double_nested_orquesta_workflow_execution(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo4"]
MOCK_EVENTS = [
# Workflow started running
MOCK_LIVEACTION_4_RUNNING,
# Child task 1 started running (sub workflow)
MOCK_LIVEACTION_4_CHILD_1_RUNNING,
# Child task 1 started running
MOCK_LIVEACTION_4_CHILD_1_1_RUNNING,
# Output produced by the child task
MOCK_LIVEACTION_4_CHILD_1_1_OUTPUT_1,
MOCK_LIVEACTION_4_CHILD_1_1_OUTPUT_2,
# Another execution has started, this output should not be included
MOCK_LIVEACTION_3_RUNNING,
# Child task 1 started running
MOCK_LIVEACTION_3_CHILD_1_RUNNING,
# Output produced by the child task
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_1,
MOCK_LIVEACTION_3_CHILD_1_OUTPUT_2,
# Child task 1 finished
MOCK_LIVEACTION_3_CHILD_1_SUCCEEDED,
# Parent workflow task finished
MOCK_LIVEACTION_3_SUCCEDED,
# End another execution
# Child task 1 has finished
MOCK_LIVEACTION_4_CHILD_1_1_SUCCEEDED,
# Child task 1 finished (sub workflow)
MOCK_LIVEACTION_4_CHILD_1_SUCCEEDED,
# Child task 2 started running
MOCK_LIVEACTION_4_CHILD_2_RUNNING,
# Output produced by child task
MOCK_LIVEACTION_4_CHILD_2_OUTPUT_1,
# Child task 2 finished
MOCK_LIVEACTION_4_CHILD_2_TIMED_OUT,
# Parent workflow task finished
MOCK_LIVEACTION_4_SUCCEDED,
]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Execution idfoo4 has started.
Child execution (task=task_1) idorquestachild1 has started.
Child execution (task=task_1) idorquestachild1_1 has started.
line orquesta 4
line orquesta 5
Child execution (task=task_1) idorquestachild1_1 has finished (status=succeeded).
Child execution (task=task_1) idorquestachild1 has finished (status=succeeded).
Child execution (task=task_2) idorquestachild2 has started.
line orquesta 100
Child execution (task=task_2) idorquestachild2 has finished (status=timeout).
Execution idfoo4 has completed (status=succeeded).
""".lstrip()
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
@mock.patch.object(
httpclient.HTTPClient,
"get",
mock.MagicMock(
return_value=base.FakeResponse(
json.dumps(MOCK_LIVEACTION_4_CHILD_2_RUNNING), 200, "OK"
)
),
)
@mock.patch("st2client.client.StreamManager", autospec=True)
def test_tail_child_execution_directly(self, mock_stream_manager):
argv = ["execution", "tail", "idfoo4"]
MOCK_EVENTS = [
# Child task 2 started running
MOCK_LIVEACTION_4_CHILD_2_RUNNING,
# Output produced by child task
MOCK_LIVEACTION_4_CHILD_2_OUTPUT_1,
# Other executions should not interfere
# Child task 1 started running
MOCK_LIVEACTION_3_CHILD_1_RUNNING,
# Child task 1 finished (sub workflow)
MOCK_LIVEACTION_4_CHILD_1_SUCCEEDED,
# Child task 2 finished
MOCK_LIVEACTION_4_CHILD_2_TIMED_OUT,
]
mock_cls = mock.Mock()
mock_cls.listen = mock.Mock()
mock_listen_generator = mock.Mock()
mock_listen_generator.return_value = MOCK_EVENTS
mock_cls.listen.side_effect = mock_listen_generator
mock_stream_manager.return_value = mock_cls
self.assertEqual(self.shell.run(argv), 0)
self.assertEqual(mock_listen_generator.call_count, 1)
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
expected_result = """
Child execution (task=task_2) idorquestachild2 has started.
line orquesta 100
Child execution (task=task_2) idorquestachild2 has finished (status=timeout).
""".lstrip()
self.assertEqual(stdout, expected_result)
self.assertEqual(stderr, "")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
import urllib2
import mox
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
from nova import test
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests import utils
from nova.tests.virt.vmwareapi import db_fakes
from nova.tests.virt.vmwareapi import stubs
from nova.virt import fake
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class fake_vm_ref(object):
def __init__(self):
self.value = 4
self._type = 'VirtualMachine'
class fake_http_resp(object):
def __init__(self):
self.code = 200
def read(self):
return "console log"
class VMwareAPIConfTestCase(test.TestCase):
"""Unit tests for VMWare API configurations."""
def setUp(self):
super(VMwareAPIConfTestCase, self).setUp()
def tearDown(self):
super(VMwareAPIConfTestCase, self).tearDown()
def test_configure_without_wsdl_loc_override(self):
# Test the default configuration behavior. By default,
# use the WSDL sitting on the host we are talking to in
# order to bind the SOAP client.
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNone(wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
url = vim.Vim.get_soap_url("https", "www.example.com")
self.assertEqual("https://www.example.com/sdk/vimService.wsdl",
wsdl_url)
self.assertEqual("https://www.example.com/sdk", url)
def test_configure_without_wsdl_loc_override_using_ipv6(self):
# Same as above but with ipv6 based host ip
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNone(wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "::1")
url = vim.Vim.get_soap_url("https", "::1")
self.assertEqual("https://[::1]/sdk/vimService.wsdl",
wsdl_url)
self.assertEqual("https://[::1]/sdk", url)
def test_configure_with_wsdl_loc_override(self):
# Use the setting vmwareapi_wsdl_loc to override the
# default path to the WSDL.
#
# This is useful as a work-around for XML parsing issues
# found when using some WSDL in combination with some XML
# parsers.
#
# The wsdl_url should point to a different host than the one we
# are actually going to send commands to.
fake_wsdl = "https://www.test.com/sdk/foo.wsdl"
self.flags(wsdl_location=fake_wsdl, group='vmware')
wsdl_loc = cfg.CONF.vmware.wsdl_location
self.assertIsNotNone(wsdl_loc)
self.assertEqual(fake_wsdl, wsdl_loc)
wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
url = vim.Vim.get_soap_url("https", "www.example.com")
self.assertEqual(fake_wsdl, wsdl_url)
self.assertEqual("https://www.example.com/sdk", url)
class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
super(VMwareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass',
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False)
self.user_id = 'fake'
self.project_id = 'fake'
self.node_name = 'test_url'
self.context = context.RequestContext(self.user_id, self.project_id)
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info(legacy_model=False)
self.image = {
'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
'disk_format': 'vhd',
'size': 512,
}
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
'uuid': "fake-uuid",
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.large',
'node': self.node_name,
}
self.instance = db.instance_create(None, values)
def _create_vm(self):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.flavor_get_by_name(None, 'm1.large')
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=None)
self._check_vm_record()
def _check_vm_record(self):
"""
Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
# Get Nova record for VM
vm_info = self.conn.get_info({'uuid': 'fake-uuid',
'name': 1})
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
vm = vms.objects[0]
# Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
self.assertEquals(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
self.assertEqual(
vm.get("config.hardware.device")[2].device.obj_name,
"ns0:VirtualE1000")
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to vSphere API.
self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
found_vm_uuid = False
found_iface_id = False
for c in vm.get("config.extraConfig"):
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
found_iface_id = True
self.assertTrue(found_vm_uuid)
self.assertTrue(found_iface_id)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""
Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = long(self.type_data['memory_mb']) << 10
self.assertEquals(info["state"], pwr_state)
self.assertEquals(info["max_mem"], mem_kib)
self.assertEquals(info["mem"], mem_kib)
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
def test_spawn(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_reboot(self):
self._create_vm()
info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_with_uuid(self):
"""Test fall back to use name when can't find by uuid."""
self._create_vm()
info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid'})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
instances = [self.instance]
self.conn.poll_rebooting_instances(60, instances)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_suspend(self):
self._create_vm()
info = self.conn.get_info({'uuid': "fake-uuid"})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.instance)
def test_resume(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.context, self.instance, self.network_info)
def test_power_off(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
def test_power_off_suspended(self):
self._create_vm()
self.conn.suspend(self.instance)
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstancePowerOffFailure,
self.conn.power_off, self.instance)
def test_resume_state_on_host_boot(self):
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, "reboot")
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("poweredOff")
self.conn.reboot(self.context, self.instance, 'network_info',
'hard', None)
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_resume_state_on_host_boot_no_reboot_1(self):
"""Don't call reboot on instance which is poweredon."""
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, 'reboot')
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("poweredOn")
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_resume_state_on_host_boot_no_reboot_2(self):
"""Don't call reboot on instance which is suspended."""
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
self.mox.StubOutWithMock(self.conn, 'reboot')
vm_util.get_vm_state_from_name(mox.IgnoreArg(),
self.instance['uuid']).AndReturn("suspended")
self.mox.ReplayAll()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
def test_get_info(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': 'fake-uuid'})
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
self.conn.destroy(self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_destroy_non_existent(self):
self._create_instance_in_the_db()
self.assertEquals(self.conn.destroy(self.instance, self.network_info),
None)
def test_pause(self):
pass
def test_unpause(self):
pass
def test_diagnostics(self):
pass
def test_get_console_output(self):
vm_ref = fake_vm_ref()
result = fake_http_resp()
self._create_instance_in_the_db()
self.mox.StubOutWithMock(vm_util, 'get_vm_ref_from_name')
self.mox.StubOutWithMock(urllib2, 'urlopen')
vm_util.get_vm_ref_from_name(mox.IgnoreArg(), self.instance['name']).\
AndReturn(vm_ref)
urllib2.urlopen(mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.conn.get_console_output(self.instance)
def _test_finish_migration(self, power_on):
"""
Tests the finish_migration method on vmops via the
VMwareVCDriver. Results are checked against whether or not
the underlying instance should have been powered on.
"""
self.power_on_called = False
def fake_power_on(instance):
self.assertEquals(self.instance, instance)
self.power_on_called = True
def fake_vmops_update_instance_progress(context, instance, step,
total_steps):
self.assertEquals(self.context, context)
self.assertEquals(self.instance, instance)
self.assertEquals(4, step)
self.assertEqual(vmops.RESIZE_TOTAL_STEPS, total_steps)
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_update_instance_progress",
fake_vmops_update_instance_progress)
# setup the test instance in the database
self._create_vm()
# perform the migration on our stubbed methods
self.conn.finish_migration(context=self.context,
migration=None,
instance=self.instance,
disk_info=None,
network_info=None,
block_device_info=None,
image_meta=None,
power_on=power_on)
# verify the results
self.assertEquals(power_on, self.power_on_called)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False)
def _test_finish_revert_migration(self, power_on):
"""
Tests the finish_revert_migration method on vmops via the
VMwareVCDriver. Results are checked against whether or not
the underlying instance should have been powered on.
"""
# setup the test instance in the database
self._create_vm()
self.power_on_called = False
self.vm_name = str(self.instance['name']) + '-orig'
def fake_power_on(instance):
self.assertEquals(self.instance, instance)
self.power_on_called = True
def fake_get_orig_vm_name_label(instance):
self.assertEquals(self.instance, instance)
return self.vm_name
def fake_get_vm_ref_from_name(session, vm_name):
self.assertEquals(self.vm_name, vm_name)
return vmwareapi_fake._get_objects("VirtualMachine").objects[0]
def fake_get_vm_ref_from_uuid(session, vm_uuid):
return vmwareapi_fake._get_objects("VirtualMachine").objects[0]
def fake_call_method(*args, **kwargs):
pass
def fake_wait_for_task(*args, **kwargs):
pass
self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on)
self.stubs.Set(self.conn._vmops, "_get_orig_vm_name_label",
fake_get_orig_vm_name_label)
self.stubs.Set(vm_util, "get_vm_ref_from_uuid",
fake_get_vm_ref_from_uuid)
self.stubs.Set(vm_util, "get_vm_ref_from_name",
fake_get_vm_ref_from_name)
self.stubs.Set(self.conn._session, "_call_method", fake_call_method)
self.stubs.Set(self.conn._session, "_wait_for_task",
fake_wait_for_task)
# perform the revert on our stubbed methods
self.conn.finish_revert_migration(instance=self.instance,
network_info=None,
power_on=power_on)
# verify the results
self.assertEquals(power_on, self.power_on_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
def test_diagnostics_non_existent_vm(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_diagnostics,
self.instance)
def test_get_console_pool_info(self):
info = self.conn.get_console_pool_info("console_type")
self.assertEquals(info['address'], 'test_url')
self.assertEquals(info['username'], 'test_username')
self.assertEquals(info['password'], 'test_pass')
def test_get_vnc_console_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.instance)
def test_get_vnc_console(self):
self._create_instance_in_the_db()
self._create_vm()
vnc_dict = self.conn.get_vnc_console(self.instance)
self.assertEquals(vnc_dict['host'], "ha-host")
self.assertEquals(vnc_dict['port'], 5910)
def test_host_ip_addr(self):
self.assertEquals(self.conn.get_host_ip_addr(), "test_url")
def test_get_volume_connector(self):
self._create_instance_in_the_db()
connector_dict = self.conn.get_volume_connector(self.instance)
self.assertEquals(connector_dict['ip'], "test_url")
self.assertEquals(connector_dict['initiator'], "iscsi-name")
self.assertEquals(connector_dict['host'], "test_url")
class VMwareAPIHostTestCase(test.TestCase):
"""Unit tests for Vmware API host calls."""
def setUp(self):
super(VMwareAPIHostTestCase, self).setUp()
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass', group='vmware')
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.conn = driver.VMwareESXDriver(False)
def tearDown(self):
super(VMwareAPIHostTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['vcpus'], 16)
self.assertEquals(stats['disk_total'], 1024)
self.assertEquals(stats['disk_available'], 500)
self.assertEquals(stats['disk_used'], 1024 - 500)
self.assertEquals(stats['host_memory_total'], 1024)
self.assertEquals(stats['host_memory_free'], 1024 - 500)
supported_instances = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self.assertEquals(stats['supported_instances'], supported_instances)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self._test_host_action(self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode, True)
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode, False)
class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
def setUp(self):
super(VMwareAPIVCDriverTestCase, self).setUp()
self.flags(cluster_name='test_cluster',
task_poll_interval=10, datastore_regex='.*', group='vmware')
self.flags(vnc_enabled=False)
self.conn = driver.VMwareVCDriver(None, False)
def tearDown(self):
super(VMwareAPIVCDriverTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
self.assertEquals(stats['vcpus'], 16)
self.assertEquals(stats['local_gb'], 1024)
self.assertEquals(stats['local_gb_used'], 1024 - 500)
self.assertEquals(stats['memory_mb'], 1024)
self.assertEquals(stats['memory_mb_used'], 1024 - 524)
self.assertEquals(stats['hypervisor_type'], 'VMware ESXi')
self.assertEquals(stats['hypervisor_version'], '5.0.0')
self.assertEquals(stats['hypervisor_hostname'], 'test_url')
def test_invalid_datastore_regex(self):
# Tests if we raise an exception for Invalid Regular Expression in
# vmware_datastore_regex
self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01',
group='vmware')
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import sys
import tempfile
import weakref
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.eager import wrap_function
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.layers import convolutional
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import tf_inspect
def cycle(obj, cycles, signatures=None):
to_save = obj
# TODO(vbardiovsky): It would be nice if exported protos reached a fixed
# point w.r.t. saving/restoring, ideally after 2nd saving.
for _ in range(cycles):
path = tempfile.mkdtemp(prefix=test.get_temp_dir())
# If available, we'll run the save and restore preferring the GPU. This
# just makes sure we aren't throwing errors and have enough
# device("CPU") blocks to satisfy the placer.
with test_util.use_gpu():
save.save(to_save, path, signatures)
loaded = load.load(path)
to_save = loaded
return loaded
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class LoadTest(test.TestCase, parameterized.TestCase):
def test_structure_import(self, cycles):
root = tracking.AutoTrackable()
root.dep_one = tracking.AutoTrackable()
root.dep_two = tracking.AutoTrackable()
root.dep_two.dep = tracking.AutoTrackable()
root.dep_three = root.dep_two.dep
imported = cycle(root, cycles)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
def test_variables(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1., trainable=True)
root.v2 = variables.Variable(2., trainable=False)
imported = cycle(root, cycles)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertFalse(imported.v2.trainable)
def test_variables_name(self, cycles):
root = tracking.AutoTrackable()
# Test 2 variables with same name: should work as the checkpoint
# is based on object name and not on variable name.
root.v1 = variables.Variable(1., trainable=True, name="v1")
root.v2 = variables.Variable(2., trainable=False, name="v1")
imported = cycle(root, cycles)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertEqual(imported.v1.name, root.v1.name)
self.assertEqual(imported.v2.name, root.v2.name)
with variable_scope.variable_scope("foo"):
imported = cycle(root, cycles)
self.assertTrue(imported.v1.name.startswith("foo/"))
self.assertTrue(imported.v2.name.startswith("foo/"))
def test_partially_defined_variable_shape(self, cycles):
class MakeVariable(module.Module):
def __init__(self):
self.v = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int64)])
def make_variable(self, initial_value):
if self.v is None:
self.v = variables.Variable(initial_value)
m = MakeVariable()
m.make_variable([1, 2, 3])
m = cycle(m, cycles)
m.v.assign([1, 2, 3, 4])
self.assertEqual([None], tensor_shape.as_shape(m.v.shape).as_list())
@test_util.run_in_graph_and_eager_modes
def test_capture_variables(self, cycles):
root = tracking.AutoTrackable()
root.weights = variables.Variable(2.)
self.evaluate(root.weights.initializer)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
for _ in range(cycles):
imported = cycle(root, 1)
self.evaluate(imported.weights.initializer)
self.assertEqual(4., self.evaluate(imported.f(constant_op.constant(2.))))
self.evaluate(imported.weights.assign(4.0))
self.assertEqual(8., self.evaluate(imported.f(constant_op.constant(2.))))
@test_util.run_in_graph_and_eager_modes
def test_capture_constant(self, cycles):
root = tracking.AutoTrackable()
captured_constant = constant_op.constant(2.)
root.f = def_function.function(
lambda x: captured_constant * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = cycle(root, cycles)
self.assertEqual(4., self.evaluate(imported.f(constant_op.constant(2.))))
def test_control_outputs(self, cycles):
exported = tracking.AutoTrackable()
exported.v = variables.Variable(1.)
exported.f = def_function.function(
lambda: exported.v.assign(2., name="should_be_control_output"))
exported_graph = exported.f.get_concrete_function().graph
self.assertIn(
exported_graph.get_operation_by_name("should_be_control_output"),
exported_graph.control_outputs)
imported = cycle(exported, cycles)
# Calling get_concrete_function wraps in a second call operation; we want to
# inspect the original function body for the control output; digging into
# graph.as_graph_def() and its FunctionDefLibrary is another option.
imported_concrete, = imported.f.concrete_functions
imported_graph = imported_concrete.graph
self.assertIn(
imported_graph.get_operation_by_name("should_be_control_output"),
imported_graph.control_outputs)
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
@test_util.run_in_graph_and_eager_modes
def test_assets(self, cycles):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.AutoTrackable()
root.asset1 = tracking.Asset(file1)
root.asset2 = tracking.Asset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(self.evaluate(imported.asset1.asset_path), "r") as f:
self.assertEqual("contents 1", f.read())
with open(self.evaluate(imported.asset2.asset_path), "r") as f:
self.assertEqual("contents 2", f.read())
def test_cond_prune(self, cycles):
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = cond_v2.cond_v2(
math_ops.less(1, 2),
lambda: x + 1,
lambda: x + 2,
)
x_out.append(xx)
return xx, 2 * y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
class Adder(module.Module):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)])
def add(self, x):
return f_pruned(x)
root = Adder()
root.add(constant_op.constant(1.))
root = cycle(root, cycles)
root.add(constant_op.constant(1.))
def test_capture_assets(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = cycle(root, cycles)
original_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_capture_assets_in_graph(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
original_output = root.f().numpy()
if cycles > 1:
root = cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = load.load(path)
imported_tensor = imported.f()
with monitored_session.MonitoredSession() as sess:
imported_output = sess.run(imported_tensor)
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_dedup_assets(self, cycles):
vocab = self._make_asset("contents")
root = tracking.AutoTrackable()
root.asset1 = tracking.Asset(vocab)
root.asset2 = tracking.Asset(vocab)
imported = cycle(root, cycles)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_explicit_save_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = cycle(
root, cycles, {
"f":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self, cycles):
f = def_function.function(
lambda x: x*2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.g = g
imported = cycle(root, cycles)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self, cycles):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_function_with_default_none_input(self, cycles):
def func(x, dtype=None):
if dtype:
return array_ops.zeros(shape=x.shape, dtype=dtype)
else:
return array_ops.zeros(shape=x.shape, dtype=dtypes.float32)
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
root.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(4, len(concrete_functions))
imported = cycle(root, cycles)
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3]),
None).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1.0, 2.0,
3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
imported.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
def test_function_no_return(self, cycles):
class TrackableWithOneVariable(tracking.AutoTrackable):
def __init__(self, initial_value=0.0):
super(TrackableWithOneVariable, self).__init__()
self.variable = variables.Variable(initial_value)
@def_function.function
def increase(self, by=1.0):
self.variable.assign_add(by)
obj = TrackableWithOneVariable(5.0)
obj.increase(constant_op.constant(10.0))
self.assertEqual(15.0, obj.variable.numpy())
obj.increase()
self.assertEqual(16.0, obj.variable.numpy())
imported = cycle(obj, cycles)
imported.increase(constant_op.constant(10.0))
self.assertEqual(26.0, imported.variable.numpy())
imported.increase(constant_op.constant(1.0))
self.assertEqual(27.0, imported.variable.numpy())
def test_structured_inputs(self, cycles):
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
# Note: by only calling f(input1) before serialization, only inputs with
# matching signature will be valid on the loaded model.
self.assertEqual(31, root.f(input1).numpy())
imported = cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f(input2)
self.assertEqual(31, imported.f(input1).numpy())
self.assertEqual(32, imported.f(input3).numpy())
def test_structured_output(self, cycles):
# Use fields with non-alphabetical order
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = tracking.AutoTrackable()
root.f = def_function.function(func)
result = root.f(constant_op.constant(2), constant_op.constant(3))
self.assertEqual(5, result[0].a.numpy())
self.assertEqual(6, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(3, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
imported = cycle(root, cycles)
result = imported.f(constant_op.constant(2), constant_op.constant(5))
self.assertEqual(7, result[0].a.numpy())
self.assertEqual(10, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(5, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
def test_optimizer(self, cycles):
class _HasOptimizer(module.Module):
def __init__(self):
super(_HasOptimizer, self).__init__()
self.layer = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function
def __call__(self, x):
return self.layer(x)
@def_function.function
def train(self, x, y):
with backprop.GradientTape() as tape:
predicted = self(x)
loss = math_ops.reduce_sum(math_ops.abs(y - predicted))
train_vars = self.layer.trainable_variables
grads = tape.gradient(loss, train_vars)
self.optimizer.apply_gradients(zip(grads, train_vars))
root = _HasOptimizer()
train_input = dict(x=constant_op.constant([[1.]]),
y=constant_op.constant([[2.]]))
root.train(**train_input)
imported = cycle(root, cycles)
self.assertAllClose(root.optimizer.learning_rate.numpy(),
imported.optimizer.learning_rate.numpy())
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
root.train(**train_input)
imported.train(**train_input)
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
def test_positional_arguments(self, cycles):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_additional_kwargs(self, cycles):
def func(x, training=False, **options):
del options
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy())
imported = cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call.*"):
imported.f(x, learning_rate=0.5, epochs=4)
self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
def test_member_function(self, cycles):
class TrackableWithMember(tracking.AutoTrackable):
def __init__(self):
super(TrackableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = TrackableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self, cycles):
class M(tracking.AutoTrackable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.)
return x * self.var
m = M()
cycle(m, cycles)
self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
g = def_function.function(
lambda x: x*weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = g
imported = cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_nested_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
# Note: this function gets called from other function defs via a
# "PartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)])
def mul(x, y):
return x * y
# Note: this function gets called from other function defs via a
# "StatefulPartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return mul(weight.read_value(), x)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return f(x) + bias,
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def h(x):
return g(x) + bias,
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = h
imported = cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 2.0])
def test_while_loop_backprop(self, cycles):
weight = variables.Variable(2., trainable=True)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=(None, None))])
def g(x):
"""Adds rows of matrix x after multiplying each entry by v."""
i_0 = constant_op.constant(0)
s_0 = constant_op.constant([0., 0.])
cond = lambda i, _: i < array_ops.shape(x)[1]
body = lambda i, s: (i + 1, s + weight * x[:, i])
i_end, s_end = control_flow_ops.while_loop(cond, body, (i_0, s_0))
del i_end
return s_end
root = tracking.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant([[1., 2., 3.], [1., -2, 3.]])
y = obj.g(x)
self.assertAllClose(y, obj.weight * [6., 2.])
loss = math_ops.reduce_sum(y) # weight * 8.
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(loss, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 8.)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 8.)
def _test_restored_func_with_captured_var_backprop(self, cycles, dtype):
weight = variables.Variable(2., trainable=True, dtype=dtype)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(dtype=dtype, shape=())])
def g(x):
return x * weight
root = tracking.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant(2.)
y = obj.g(x)
self.assertAllClose(y, obj.weight * 2.)
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(y, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 2.)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 2.)
def test_restored_func_with_captured_var_backprop_float32(self, cycles):
self._test_restored_func_with_captured_var_backprop(cycles, dtypes.float32)
def test_restored_func_with_captured_var_backprop_float64(self, cycles):
self.skipTest("b/144573917")
self._test_restored_func_with_captured_var_backprop(cycles, dtypes.float64)
def test_callable(self, cycles):
class M1(tracking.AutoTrackable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def __call__(self, x):
return x
root = tracking.AutoTrackable()
root.m1 = M1()
root.m2 = tracking.AutoTrackable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
imported = cycle(root, cycles)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self, cycles):
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
root = tracking.AutoTrackable()
root.__call__ = tracking.AutoTrackable()
root.__call__.__call__ = tracking.AutoTrackable()
root.__call__.__call__.__call__ = func
imported = cycle(root, cycles)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
def test_load_in_graph_mode(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1., name="v_one", trainable=False)
root.v2 = variables.Variable(2., name="v_two", trainable=True)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default() as g:
imported = load.load(path)
var_v1 = imported.v1
self.assertFalse(var_v1.trainable)
var_v2 = imported.v2
self.assertTrue(var_v2.trainable)
output = imported.f(constant_op.constant(2.))
with monitored_session.MonitoredSession() as sess:
self.assertEqual(1.0, sess.run(var_v1))
self.assertEqual(4.0, sess.run(output))
self.assertCountEqual([var_v1, var_v2],
g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
# load() should not add to TRAINABLE_VARIABLES. Higher levels of model
# building control retraining or frozen use of imported SavedModels.
self.assertCountEqual([],
g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
def test_load_in_func_graph(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
closure = tracking.AutoTrackable()
@def_function.function
def func(x):
if not hasattr(closure, "model"):
closure.model = load.load(path)
return closure.model.f(x)
inputs = constant_op.constant(2.)
self.assertEqual(4.0, func(inputs).numpy())
def test_soft_matching(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(1, len(concrete_functions))
imported = cycle(root, cycles)
with self.assertRaisesRegexp(ValueError, "Python inputs incompatible"):
# We cannot call the function with a constant of shape ().
imported.f(constant_op.constant(2)).numpy()
# TODO(vbardiovsky): When classes are revived with input_signatures, we
# should also check that the calls below are not generating any more
# concrete functions.
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_get_concrete_function(self, cycles):
@def_function.function
def func(x, training=False):
if training:
return 2 * x
else:
return 3 * x
func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32))
root = tracking.AutoTrackable()
root.f = func
imported = cycle(root, cycles)
concrete = imported.f.get_concrete_function(
training=True, x=tensor_spec.TensorSpec([None], dtypes.int32))
self.assertAllEqual([2, 4, 6, 8],
concrete(x=constant_op.constant([1, 2, 3, 4])).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
def test_concrete_function(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_captures(self, cycles):
class Root(module.Module):
def __init__(self):
self.v = variables.Variable(1.)
self.v1 = variables.Variable(1.)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def use_v(self, x):
return self.v + self.v1 + 1.
root = Root()
self.assertIn(root.v.handle,
root.use_v.get_concrete_function().graph.external_captures)
for _ in range(cycles):
root = cycle(root, 1, signatures=root.use_v.get_concrete_function())
func_captures = root.use_v.get_concrete_function().graph.external_captures
self.assertLen(func_captures, 2)
self.assertTrue(any(root.v.handle is t for t in func_captures))
self.assertTrue(any(root.v1.handle is t for t in func_captures))
signature_captures = root.signatures[
"serving_default"].graph.external_captures
self.assertLen(signature_captures, 2)
self.assertTrue(any(root.v.handle is t for t in signature_captures))
self.assertTrue(any(root.v1.handle is t for t in signature_captures))
def test_concrete_function_arg_names(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6],
imported.f(x=constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_no_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(constant_op.constant([1]))
self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(root, cycles, signatures={})
self.assertAllEqual([6],
imported.f(constant_op.constant([3])).numpy())
def test_concrete_function_backprop(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.float32)])
def func(x):
return x ** 2.
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
def _compute_gradient(function):
with backprop.GradientTape() as tape:
inp = constant_op.constant(1.)
tape.watch(inp)
output = function(inp)
return tape.gradient(output, inp)
self.assertEqual(2., _compute_gradient(root.f).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(root, cycles, signatures={})
self.assertEqual(2., _compute_gradient(imported.f).numpy())
def test_revived_concrete_function_kwargs(self, cycles):
@def_function.function
def func(x, y):
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.float32))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_revived_concrete_function_tensorspec_kwargs(self, cycles):
@def_function.function
def func(*args):
x, y = args
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32, name="x"),
tensor_spec.TensorSpec([], dtypes.float32, name="y"))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
imported = cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_concrete_function_variable_argument(self, cycles):
capture = variables.Variable(0)
@def_function.function
def func(v):
v.assign_add(1)
capture.assign_sub(1)
@def_function.function(input_signature=[
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
])
def func_with_input_signature(v):
v.assign_add(5)
capture.assign_sub(5)
return 1
vsave = variables.Variable(1)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(vsave)
root.f_sig = func_with_input_signature.get_concrete_function()
root.capture = capture
self.assertEqual(1, vsave.numpy())
root.f(vsave)
self.assertEqual(2, vsave.numpy())
self.assertEqual(-1, capture.numpy())
root.f_sig(vsave)
self.assertEqual(7, vsave.numpy())
self.assertEqual(-6, capture.numpy())
imported = cycle(root, cycles)
vload = variables.Variable(1)
imported.f(vload)
self.assertEqual(2, vload.numpy())
imported.f(v=vload)
self.assertEqual(3, vload.numpy())
self.assertEqual(-8, imported.capture.numpy())
imported.f_sig(v=vload)
self.assertEqual(8, vload.numpy())
self.assertEqual(-13, imported.capture.numpy())
self.assertEqual(-6, capture.numpy())
def test_function_and_component(self, cycles):
@def_function.function
def func(v):
return v + 1
root = tracking.AutoTrackable()
root.func = func
root.concrete_func = func.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int32))
one = constant_op.constant(1)
self.assertEqual(2, root.func(one).numpy())
self.assertEqual(2, root.concrete_func(one).numpy())
imported = cycle(root, cycles)
self.assertEqual(2, imported.func(one).numpy())
self.assertEqual(2, imported.concrete_func(one).numpy())
def test_dict(self, cycles):
root = tracking.AutoTrackable()
root.variables = dict(a=variables.Variable(1.))
root.variables["b"] = variables.Variable(2.)
root.variables["c"] = 1
root.funcs = dict(
a=def_function.function(lambda: constant_op.constant(100.)))
root.funcs["conc"] = root.funcs["a"].get_concrete_function()
imported = cycle(root, cycles)
self.assertEqual(1., imported.variables["a"].numpy())
self.assertEqual(2., imported.variables["b"].numpy())
self.assertEqual(set(["a", "b"]), set(imported.variables.keys()))
self.assertEqual(100., imported.funcs["a"]().numpy())
self.assertEqual(100., imported.funcs["conc"]().numpy())
def test_list(self, cycles):
root = tracking.AutoTrackable()
root.variables = [variables.Variable(1.)]
root.variables.append(1)
root.variables.append(variables.Variable(3.))
imported = cycle(root, cycles)
self.assertEqual(1., imported.variables[0].numpy())
self.assertEqual(3., imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertEqual(3, len(imported.variables))
def test_tuple(self, cycles):
root = tracking.AutoTrackable()
root.variables = (variables.Variable(1.), 1, variables.Variable(3.))
imported = cycle(root, cycles)
self.assertEqual(1., imported.variables[0].numpy())
self.assertEqual(3., imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertLen(imported.variables, 3)
def test_functions_list(self, cycles):
root = tracking.AutoTrackable()
v1 = variables.Variable(1.)
root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1 ** 2))]
root.variables = [v1]
@def_function.function
def _v2_loss():
if len(root.variables) == 1:
v2 = variables.Variable(2.)
root.variables.append(v2)
return math_ops.reduce_sum(root.variables[1] ** 2)
root.losses.append(_v2_loss)
self.assertAllClose([1., 4.], [loss() for loss in root.losses])
imported = cycle(root, cycles)
self.assertAllClose([1., 4.], [loss() for loss in imported.losses])
imported.variables[0].assign(3.)
imported.variables[1].assign(4.)
self.assertAllClose([9., 16.], [loss() for loss in imported.losses])
def test_captured_constant(self, cycles):
const = array_ops.zeros([100])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda: const + 1.)
root.g = def_function.function(lambda: const + 2.)
self.assertAllClose(array_ops.ones([100]), root.f())
self.assertAllClose(2. * array_ops.ones([100]), root.g())
imported = cycle(root, cycles)
self.assertAllClose(array_ops.ones([100]), imported.f())
self.assertAllClose(2. * array_ops.ones([100]), imported.g())
# TODO(b/123408994): Use the public get_concrete_function.
f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0]
g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0]
self.assertLen(f_concrete.captured_inputs, 1)
self.assertLen(g_concrete.captured_inputs, 1)
# We should be using the same captured EagerTensor in both functions, not
# duplicating the constant.
self.assertIs(f_concrete.captured_inputs[0],
g_concrete.captured_inputs[0])
def test_functions_accessed_once(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self._counter = 0
@property
def make_func(self):
@def_function.function
def f():
return constant_op.constant(self._counter)
f.get_concrete_function() # force a trace
self._counter += 1
return f
exported = Exported()
imported = cycle(exported, cycles)
self.assertEqual(0, imported.make_func().numpy())
self.assertEqual(1, exported.make_func().numpy())
def test_overwritten_signatures_error(self, cycles):
exported = tracking.AutoTrackable()
exported.f = def_function.function(lambda: constant_op.constant(1.))
imported = cycle(
exported, cycles,
signatures={"key": exported.f.get_concrete_function()})
self.assertEqual(1., imported.signatures["key"]()["output_0"].numpy())
imported.signatures = {"key1": imported.signatures["key"]}
with self.assertRaisesRegexp(ValueError, "signatures"):
save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
def test_signature_loading(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self.v = variables.Variable(3.)
@def_function.function
def do(self, x):
return self.v * x
exported = Exported()
imported = cycle(
exported,
cycles=1,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = cycle(imported, cycles=1, signatures=imported.signatures)
self.assertEqual(["serving_default"], list(imported.signatures.keys()))
imported_function = imported.signatures["serving_default"]
two = constant_op.constant(2.)
self.assertEqual(6., imported_function(x=two)["output_0"].numpy())
imported.v.assign(4.)
self.assertEqual(8., imported_function(x=two)["output_0"].numpy())
self.assertEqual(8., imported_function(two)["output_0"].numpy())
with self.assertRaises(TypeError):
# The signatures mapping is immutable
imported.signatures["random_key"] = 3
def test_multiple_argument_signatures_no_positional(self, cycles):
class Exported(tracking.AutoTrackable):
@def_function.function
def do(self, x, y):
return x + y
exported = Exported()
imported = cycle(
exported, cycles=1, signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = cycle(imported, cycles=1, signatures=imported.signatures)
with self.assertRaises(TypeError):
imported.signatures["serving_default"](
constant_op.constant(1.),
y=constant_op.constant(2.))
self.assertEqual(
{"output_0": 3.},
self.evaluate(imported.signatures["serving_default"](
x=constant_op.constant(1.),
y=constant_op.constant(2.))))
def _make_model_with_tables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values)
table1 = lookup_ops.HashTable(table1_initializer, default_val)
table2_file = self._make_asset("test\nfoo\nbrain\n")
table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file)
table2 = lookup_ops.HashTable(table2_initializer, default_val)
def _make_lookup_function(table):
signature = [tensor_spec.TensorSpec(None, dtypes.string)]
return def_function.function(input_signature=signature)(
lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda
root = tracking.AutoTrackable()
root.table1 = table1
root.lookup1 = _make_lookup_function(table1)
root.table2 = table2
root.lookup2 = _make_lookup_function(table2)
return root
def test_table(self, cycles):
root = self._make_model_with_tables()
imported = cycle(root, cycles, signatures={})
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy())
self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy())
def test_table_collections_untouched_eager(self, cycles):
def _gather_nonempty_collections():
graph = ops.get_default_graph()
gathered = {}
for collection in graph.collections:
collection_contents = graph.get_collection(collection)
if collection_contents:
gathered[collection] = collection_contents
return gathered
root = self._make_model_with_tables()
# Warm up collections to ignore those that don't expand every iteration,
# e.g. the __varscope collection.
cycle(root, 1)
original_collections = _gather_nonempty_collections()
cycle(root, cycles)
self.assertEqual(original_collections, _gather_nonempty_collections())
def test_table_in_graph(self, cycles):
root = self._make_model_with_tables()
if cycles > 1:
root = cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = cycle(root, 1)
with ops.Graph().as_default():
imported = load.load(path)
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
output1 = imported.lookup1(keys)
output2 = imported.lookup2(keys)
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual([0, -1, -1, 2], sess.run(output1))
self.assertAllEqual([2, 0, 1, -1], sess.run(output2))
def test_perserve_argspec(self, cycles):
def f(a, b, c): # pylint: disable=unused-argument
return None
original_fullargspec = tf_inspect.getfullargspec(f)
root = tracking.AutoTrackable()
root.f = def_function.function(f)
imported = cycle(root, cycles)
restored_fullargspec = tf_inspect.getfullargspec(imported.f)
self.assertEqual(original_fullargspec, restored_fullargspec)
def test_canonicalize_inputs(self, cycles):
@def_function.function(autograph=False)
def func(a=1, b=2, c=3, training=True):
if training:
return [a, b, c, training]
else:
return [c, b, a, training]
# TODO(b/123501567): Work-around to trigger generic traces of a function
# with extra non tensor args.
signature = 3*[tensor_spec.TensorSpec(None, dtypes.float32)]
@def_function.function(input_signature=signature)
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegexp(ValueError,
"Could not find matching function"):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles):
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial(self, cycles):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.ones([1])))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(), [1.0])
root = cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0])
def test_partial_with_non_tensor_defaults(self, cycles):
def f(x, y=3):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles):
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional_captured_tensors(self, cycles):
def f(x, y):
return x + y
tensor = constant_op.constant(5) + constant_op.constant(7)
func = def_function.function(functools.partial(f, tensor))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 13)
root = cycle(root, cycles)
self.assertAllEqual(root.f(1), 13)
def test_partial_keyword_hiding_default(self, cycles):
def f(x=3, training=True, y=7):
if training:
return x + y
else:
return x + y + 2
func = def_function.function(functools.partial(f, y=6))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
root = cycle(root, cycles)
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
def test_partial_with_kwargs(self, cycles):
def f(a, b, *args, **kwargs):
args_sum = sum(args)
return a + b + kwargs["some_tensor"] * kwargs["learning_rate"] + args_sum
constant_tensor = constant_op.constant(10)
func = def_function.function(
functools.partial(
f, 7, 1, 2, learning_rate=3, some_tensor=constant_tensor))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(4)).numpy(), 44)
root = cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(5)).numpy(), 45)
def test_partial_bind_only_first_argument(self, cycles):
if sys.version_info[0] < 3:
self.skipTest("Test is only valid in python3. Only then we get some more "
"advanced inspection of partials where this is allowed.")
def f(x, y):
return x + y
partial_func = functools.partial(f, x=5)
tf_func = def_function.function(partial_func)
root = tracking.AutoTrackable()
root.f = tf_func
self.assertAllEqual(root.f(y=constant_op.constant(7)), 12)
root = cycle(root, cycles)
self.assertAllEqual(root.f(y=constant_op.constant(9)), 14)
def test_partial_with_passed_fn_as_default(self, cycles):
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_partial_with_input_signature(self, cycles):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
self.assertAllEqual((1, 2.0, 4), partial(2.0))
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
func = def_function.function(partial, input_signature=signature)
root = tracking.AutoTrackable()
root.f = func
a, b, c = root.f(2.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 2.0, 4))
root = cycle(root, cycles)
a, b, c = root.f(3.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 3.0, 4))
def test_convert_to_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return x
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles):
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b")))
obj = tracking.AutoTrackable()
obj.__call__ = f
if sys.version_info.major == 3 and sys.version_info.minor < 5:
# TODO(allenl): figure out why this doesn't work in Python3.4
self.skipTest("Not working in Python 3.4")
imported = cycle(obj, cycles)
self.assertAllClose(3.,
imported(NamedTupleType(a=constant_op.constant(1.),
b=constant_op.constant(2.))))
def test_extra_args(self, cycles):
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = cycle(obj, cycles)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles):
@def_function.function(input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32)
])
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = tracking.AutoTrackable()
root.f = func
root = cycle(root, cycles)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
output, = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual(
[None, 3], signature.inputs[0].shape.as_list())
self.assertEqual(
[None, 2], signature.inputs[1].shape.as_list())
self.assertEqual(
[None, 5], signature.outputs[0].shape.as_list())
def test_variables_destroyed(self, cycles):
v1 = variables.Variable(1.)
weak_v1 = weakref.ref(v1)
root = util.Checkpoint(v=v1)
root = cycle(root, cycles)
del v1
self.assertIsNone(weak_v1())
weak_v2 = weakref.ref(root.v)
del root
self.assertIsNone(weak_v2())
def test_variable_attributes_preserved(self, cycles):
v = variables.Variable(
1.,
trainable=False,
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
v.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
v.aggregation)
root = tracking.AutoTrackable()
root.v = v
root = cycle(root, cycles)
self.assertEqual(False, root.v.trainable)
self.assertEqual(variables.VariableSynchronization.NONE,
root.v.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
root.v.aggregation)
def test_captured_dataset(self, cycles):
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (
dataset_ops.Dataset.range(5)
.map(lambda x: x ** 2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy())
root = cycle(root, cycles)
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy())
def test_tuple_signature(self, cycles):
root = util.Checkpoint()
root.f = def_function.function(
lambda: (array_ops.ones([]), array_ops.zeros([])),
input_signature=())
for _ in range(cycles):
root = cycle(root, 1, signatures=root.f)
self.assertEqual(({"output_0": 1., "output_1": 0.}),
self.evaluate(root.signatures["serving_default"]()))
def test_model_with_custom_function_attached(self, cycles):
root = util.Checkpoint(model=sequential.Sequential([core.Dense(2)]))
@def_function.function
def _use_sequential(x):
return root.model.call(x)
root.model.traced_call = _use_sequential
original = root.model.traced_call(array_ops.zeros([1, 1])).numpy()
root = cycle(root, cycles)
self.assertAllEqual(
original,
root.model.traced_call(array_ops.zeros([1, 1])).numpy())
def test_version_info(self, cycles):
root = util.Checkpoint()
root = cycle(root, cycles)
self.assertEqual(versions.__version__, root.tensorflow_version)
self.assertEqual(versions.__git_version__, root.tensorflow_git_version)
def test_load_grad_save(self, cycles):
root = util.Checkpoint()
root.v = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v * x)
root.g = def_function.function(root.f)
for _ in range(cycles):
with backprop.GradientTape() as tape:
inp = constant_op.constant(2.)
tape.watch(inp)
output = root.g(inp)
self.assertAllClose(4., output)
self.assertAllClose(2., tape.gradient(output, inp))
root = cycle(root, 1)
def test_destroy_resource(self, cycles):
def get_handle():
return resource_variable_ops.var_handle_op(
shape=tensor_shape.as_shape([]),
dtype=dtypes.float32,
shared_name="my_var_name",
name="my_var",
container="my_container")
class MyResourceDeleter(tracking.CapturableResourceDeleter):
def destroy_resource(self):
handle = get_handle()
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True)
class MyResource(tracking.TrackableResource):
def __init__(self):
# Set the resource deleter, so when the resource object goes out of
# scope it will be deleted automatically.
super(MyResource, self).__init__(deleter=MyResourceDeleter())
def _create_resource(self):
return get_handle()
def _initialize(self):
resource_variable_ops.assign_variable_op(
self.resource_handle, 1.0, name="assign")
class MyModel(tracking.AutoTrackable):
def __init__(self):
super(MyModel, self).__init__()
self.resource = MyResource()
@def_function.function(input_signature=[])
def increase(self):
handle = self.resource.resource_handle
resource_variable_ops.assign_add_variable_op(
handle, 10.0, name="assign_add")
return resource_variable_ops.read_variable_op(handle, dtypes.float32)
root = MyModel()
imported = cycle(root, cycles)
self.assertEqual(11, imported.increase().numpy()) # Create the resource.
handle = imported.resource.resource_handle
# Delete the imported SaveModel. Since we explicitly set the deleter, it
# should destroy the resource automatically.
del imported
# Try to destroy the resource again, should fail.
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False)
def test_function_called_as_operation(self, cycles):
@framework_function.Defun(dtypes.float32)
def inner(x):
return x + 1.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)])
def outer(x):
return inner(x)
root = module.Module()
root.f = outer
imported = cycle(root, cycles)
self.assertAllClose(2., imported.f(constant_op.constant(1.)))
def test_ragged(self, cycles):
@def_function.function(input_signature=[
ragged_tensor.RaggedTensorSpec(shape=[None, None], dtype=dtypes.int32)
])
def f(x):
return x + 1
obj = tracking.AutoTrackable()
obj.f = f
imported1 = cycle(obj, cycles, signatures={})
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported1.f(rt), [[2, 3], [4]])
imported2 = cycle(obj, cycles)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported2.f(rt), [[2, 3], [4]])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class KerasLoadTest(test.TestCase, parameterized.TestCase):
def test_dense_features_layer(self, cycles):
columns = [
feature_column_lib.numeric_column("x"),
feature_column_lib.numeric_column("y")
]
layer = feature_column_lib.DenseFeatures(columns)
model = sequential.Sequential([layer])
model_input = {"x": constant_op.constant([[1.]]),
"y": constant_op.constant([[2.]])}
self.assertAllClose([[1., 2.]], model.predict(model_input, steps=1))
loaded = cycle(model, cycles)
output, = loaded._default_save_signature(model_input).values()
self.assertAllClose([[1., 2.]], output)
signature_output, = loaded.signatures["serving_default"](
**model_input).values()
self.assertAllClose([[1., 2.]], signature_output)
def test_dense_features_layer_fit(self, cycles):
columns = [feature_column_lib.numeric_column("x")]
model = sequential.Sequential(
[feature_column_lib.DenseFeatures(columns),
core.Dense(1)])
model_input = {"x": constant_op.constant([[1.]])}
model.compile(optimizer="adam", loss="mse", run_eagerly=True,
experimental_run_tf_function=True)
model.fit(model_input, constant_op.constant([[3.]]))
loaded = cycle(model, cycles)
loaded._default_save_signature(model_input)
loaded.signatures["serving_default"](**model_input)
def test_multi_output_layer(self, cycles):
inp = input_layer.Input(name="inp", shape=(None,), dtype=dtypes.float32)
class _MultiOutput(base_layer.Layer):
def call(self, x):
return x + 1., x + 2.
out = _MultiOutput(name="out")(inp)
model = training_lib.Model(inp, out)
loaded = cycle(model, cycles)
self.assertAllClose(
dict(out=2., out_1=3.),
loaded.signatures["serving_default"](constant_op.constant(1.)))
def test_functional_model_with_conv(self, cycles):
x = input_layer.Input(name="x", shape=(None, None, 3), dtype=dtypes.float32)
conved = convolutional.Conv2D(filters=3, kernel_size=3, dilation_rate=2)(x)
model = training_lib.Model([x], conved)
model_input = array_ops.ones((1, 10, 10, 3))
initial_output = model.predict([model_input])
model = cycle(model, cycles)
self.assertAllClose(
[initial_output],
list(model.signatures["serving_default"](model_input).values()))
class SingleCycleTests(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self):
root = tracking.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_docstring_examples(self):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = util.Checkpoint(v=variables.Variable(3.))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)])
save.save(exported, path)
imported = load.load(path)
self.assertEqual(3., imported.v.numpy())
self.assertEqual(6., imported.f(x=constant_op.constant(2.)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = load.load(path)
f = imported.signatures["serving_default"]
self.assertAllEqual(
[[-3.]],
f(x=constant_op.constant([[-1.]]))["output_0"].numpy())
def test_object_with_extra_dependencies(self):
class Extra(tracking.AutoTrackable):
def _list_extra_dependencies_for_serialization(self, cache):
if self not in cache:
cache[self] = {"a": variables.Variable(5.)}
return cache[self]
root = Extra()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = load.load(path)
self.assertEqual(5, self.evaluate(imported.a))
root.a = variables.Variable(3.)
with self.assertRaisesRegexp(
ValueError,
"object has an attribute named a, which is reserved."):
save.save(root, path)
if __name__ == "__main__":
test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.notifications.objects import base
from nova.objects import base as nova_base
from nova.objects import fields
@nova_base.NovaObjectRegistry.register_notification
class InstancePayload(base.NotificationPayloadBase):
SCHEMA = {
'uuid': ('instance', 'uuid'),
'user_id': ('instance', 'user_id'),
'tenant_id': ('instance', 'project_id'),
'reservation_id': ('instance', 'reservation_id'),
'display_name': ('instance', 'display_name'),
'host_name': ('instance', 'hostname'),
'host': ('instance', 'host'),
'node': ('instance', 'node'),
'os_type': ('instance', 'os_type'),
'architecture': ('instance', 'architecture'),
'availability_zone': ('instance', 'availability_zone'),
'image_uuid': ('instance', 'image_ref'),
'kernel_id': ('instance', 'kernel_id'),
'ramdisk_id': ('instance', 'ramdisk_id'),
'created_at': ('instance', 'created_at'),
'launched_at': ('instance', 'launched_at'),
'terminated_at': ('instance', 'terminated_at'),
'deleted_at': ('instance', 'deleted_at'),
'state': ('instance', 'vm_state'),
'power_state': ('instance', 'power_state'),
'task_state': ('instance', 'task_state'),
'progress': ('instance', 'progress'),
'metadata': ('instance', 'metadata'),
}
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'uuid': fields.UUIDField(),
'user_id': fields.StringField(nullable=True),
'tenant_id': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'host_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('FlavorPayload'),
'image_uuid': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'created_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'state': fields.InstanceStateField(nullable=True),
'power_state': fields.InstancePowerStateField(nullable=True),
'task_state': fields.InstanceTaskStateField(nullable=True),
'progress': fields.IntegerField(nullable=True),
'ip_addresses': fields.ListOfObjectsField('IpPayload'),
'metadata': fields.DictOfStringsField(),
}
def __init__(self, instance, **kwargs):
super(InstancePayload, self).__init__(**kwargs)
self.populate_schema(instance=instance)
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionPayload(InstancePayload):
# No SCHEMA as all the additional fields are calculated
VERSION = '1.0'
fields = {
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
}
def __init__(self, instance, fault, ip_addresses, flavor, **kwargs):
super(InstanceActionPayload, self).__init__(
instance=instance,
fault=fault,
ip_addresses=ip_addresses,
flavor=flavor,
**kwargs)
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
VERSION = '1.0'
fields = {
'old_volume_id': fields.UUIDField(),
'new_volume_id': fields.UUIDField(),
}
def __init__(self, instance, fault, ip_addresses, flavor,
old_volume_id, new_volume_id):
super(InstanceActionVolumeSwapPayload, self).__init__(
instance=instance,
fault=fault,
ip_addresses=ip_addresses,
flavor=flavor,
old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdatePayload(InstancePayload):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'state_update': fields.ObjectField('InstanceStateUpdatePayload'),
'audit_period': fields.ObjectField('AuditPeriodPayload'),
'bandwidth': fields.ListOfObjectsField('BandwidthPayload'),
'old_display_name': fields.StringField(nullable=True)
}
def __init__(self, instance, flavor, ip_addresses, state_update,
audit_period, bandwidth, old_display_name):
super(InstanceUpdatePayload, self).__init__(
instance=instance,
flavor=flavor,
ip_addresses=ip_addresses,
state_update=state_update,
audit_period=audit_period,
bandwidth=bandwidth,
old_display_name=old_display_name)
@nova_base.NovaObjectRegistry.register_notification
class IpPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'label': fields.StringField(),
'mac': fields.MACAddressField(),
'meta': fields.DictOfStringsField(),
'port_uuid': fields.UUIDField(nullable=True),
'version': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'device_name': fields.StringField(nullable=True)
}
@classmethod
def from_network_info(cls, network_info):
"""Returns a list of IpPayload object based on the passed
network_info.
"""
ips = []
if network_info is not None:
for vif in network_info:
for ip in vif.fixed_ips():
ips.append(cls(
label=vif["network"]["label"],
mac=vif["address"],
meta=vif["meta"],
port_uuid=vif["id"],
version=ip["version"],
address=ip["address"],
device_name=vif["devname"]))
return ips
@nova_base.NovaObjectRegistry.register_notification
class FlavorPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'flavorid': ('flavor', 'flavorid'),
'memory_mb': ('flavor', 'memory_mb'),
'vcpus': ('flavor', 'vcpus'),
'root_gb': ('flavor', 'root_gb'),
'ephemeral_gb': ('flavor', 'ephemeral_gb'),
}
fields = {
'flavorid': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
}
def __init__(self, instance, **kwargs):
super(FlavorPayload, self).__init__(**kwargs)
self.populate_schema(instance=instance, flavor=instance.flavor)
@nova_base.NovaObjectRegistry.register_notification
class BandwidthPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'network_name': fields.StringField(),
'in_bytes': fields.IntegerField(),
'out_bytes': fields.IntegerField(),
}
@nova_base.NovaObjectRegistry.register_notification
class AuditPeriodPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'audit_period_beginning': fields.DateTimeField(),
'audit_period_ending': fields.DateTimeField(),
}
@nova_base.NovaObjectRegistry.register_notification
class InstanceStateUpdatePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'old_state': fields.StringField(nullable=True),
'state': fields.StringField(nullable=True),
'old_task_state': fields.StringField(nullable=True),
'new_task_state': fields.StringField(nullable=True),
}
@base.notification_sample('instance-delete-start.json')
@base.notification_sample('instance-delete-end.json')
@base.notification_sample('instance-pause-start.json')
@base.notification_sample('instance-pause-end.json')
# @base.notification_sample('instance-unpause-start.json')
# @base.notification_sample('instance-unpause-end.json')
@base.notification_sample('instance-resize-start.json')
@base.notification_sample('instance-resize-end.json')
@base.notification_sample('instance-suspend-start.json')
@base.notification_sample('instance-suspend-end.json')
@base.notification_sample('instance-power_on-start.json')
@base.notification_sample('instance-power_on-end.json')
# @base.notification_sample('instance-power_off-start.json')
# @base.notification_sample('instance-power_off-end.json')
# @base.notification_sample('instance-reboot-start.json')
# @base.notification_sample('instance-reboot-end.json')
# @base.notification_sample('instance-shutdown-start.json')
# @base.notification_sample('instance-shutdown-end.json')
# @base.notification_sample('instance-snapshot-start.json')
# @base.notification_sample('instance-snapshot-end.json')
# @base.notification_sample('instance-add_fixed_ip-start.json')
# @base.notification_sample('instance-add_fixed_ip-end.json')
@base.notification_sample('instance-shelve-start.json')
@base.notification_sample('instance-shelve-end.json')
# @base.notification_sample('instance-resume-start.json')
# @base.notification_sample('instance-resume-end.json')
@base.notification_sample('instance-restore-start.json')
@base.notification_sample('instance-restore-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionPayload')
}
@base.notification_sample('instance-update.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceUpdatePayload')
}
@base.notification_sample('instance-volume_swap-start.json')
@base.notification_sample('instance-volume_swap-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumeSwapPayload')
}
|
|
# -*- coding: UTF8
import sys
import timeit
import elite
start = timeit.default_timer()
mydb = elite.db()
##################
# options
##################
myStartSystem = "ltt 9810"
maxSunDistance = 1500
sellDist = 160
maxHops = 6
maxJumpDistance = 12.71
maxDeep = 18
minDeep = 16
useSlowMod = True # rerouting best route with all avalibel routes to optimize results
useSlowModOnStart = False # extrem slow deep 10 =8161314 routes =12gb ram usage optimal results
# todo: elite.route.calcAllRoutesFromSystem is not optimal
if maxDeep > 19 and sys.maxint <= 2147483647 and (useSlowMod == True or useSlowModOnStart == True):
print("warning: on 32bit systems no slowmod >deep 19 avalibel" )
useSlowMod = False
useSlowModOnStart = False
if minDeep >= maxDeep:
minDeep = maxDeep
if useSlowModOnStart == True:
useSlowMod = False
def sellItems(system, route, muted=False):
myTitle = True
# print(" -->",indexstart)
for oldsystem in route:
if oldsystem._sellDone:
continue
dist = mydb.getDistanceFromTo(system.systemID, oldsystem.systemID)
if dist >= sellDist:
if oldsystem._raresInSystem:
oldsystem._sellDone = True
if muted == False:
if myTitle:
print("\t__Sell__")
myTitle = False
for item in oldsystem._raresInSystem:
print("\t\tItem: %s" % (item["Name"]))
myrares = elite.rares(mydb)
myroute = elite.route(mydb)
systemList = myrares.getRaresListFitered(maxSunDistance)
print("calc with %d rare systems" % len(systemList))
myRaresRoute = []
startSystem = elite.route(mydb, None, maxDeep, maxJumpDistance, maxHops)
startSystem._availableSystemList = systemList
startSystem.systemID = mydb.getSystemIDbyName(myStartSystem) # Startsystem
startSystem.initSystem = startSystem
startSystem.calcAllRoutesFromSystem(useSlowModOnStart)
def getBestRouteFromResult(startSystem , useSlowMod=True, maxDeep=None):
if not maxDeep :maxDeep = startSystem.calcRoutingDeep()
elif maxDeep > startSystem.calcRoutingDeep():
maxDeep = startSystem.calcRoutingDeep()
allRoutesTop = []
bestSellCount = None
bestOption = None
print("max deep" , maxDeep)
print("hops", startSystem.getMinHops(maxDeep))
print("star dist", startSystem.getMinStarDist(maxDeep))
print("dist", startSystem.getMinDistFromBest(maxDeep))
print("routes calculated", startSystem.calcRouteSum())
allRoutes = startSystem.getAllRoutes(maxDeep)
print("routes with maxdeep", len(allRoutes))
while len(allRoutesTop) == 0 :
if bestSellCount: # its on loop 2 true "give the best != 0 sell cout route back"
bestOption = True
for endsystem in allRoutes:
route = endsystem.getSystemsFromRoute()
for system in route: # reset sell status
system._sellDone = None
system._raresInSystem = None
for system in route:
if system._raresInSystem == None: system._raresInSystem = myrares.getRaresInSystem(system.systemID)
sellItems(system, route, True)
sellItems(route[0], route, True) # sell on start== end system
notSellCount = 0
for oldsystem in route:
if oldsystem._sellDone == None and oldsystem._raresInSystem:
notSellCount += 1
if notSellCount == 0 or (bestOption == True and notSellCount == bestSellCount):
# top route can sell all items
allRoutesTop.append(route)
bestSellCount = notSellCount
elif bestSellCount == None or bestSellCount > notSellCount:
bestSellCount = notSellCount
print("not sell count", bestSellCount)
print("top routes", len(allRoutesTop))
bestDist = None
shortestStarDist = None
backToStartDist = None
bestBackToStartDist = None
bestRating = None
for route in allRoutesTop:
totalDist = 0
totalSunDist = 0
syslist = ""
for system in route:
if system._hopsFromBefore : totalDist += system._dist
if system.starDist : totalSunDist += system.starDist
syslist += "%s" % system.systemID
syslist += ", "
# print(totalHops, totalSunDist, system.systemName ,syslist)
backToStartDist = mydb.getDistanceFromTo(system.systemID, route[0].systemID)
totalDist += backToStartDist # add back to start distance
#######################
# # rating calculation
#######################
rating = backToStartDist * 2 + totalDist / len(route) + totalSunDist * 1.5 / len(route)
# print(backToStartDist, totalDist, totalSunDist , rating)
if not shortestStarDist or shortestStarDist > totalSunDist:
shortestStarDist = totalSunDist
if not bestBackToStartDist or bestBackToStartDist > backToStartDist:
bestBackToStartDist = backToStartDist
if not bestDist or bestDist > totalDist:
bestDist = totalDist
if not bestRating or bestRating > rating:
usedRoute = "used route: starDist %dls len %dly backtostart %dly" % (totalSunDist, totalDist, backToStartDist)
bestRating = rating
bestRoute = route
print("best rating ", bestRating)
if bestRoute:
dist = bestRoute[len(bestRoute) - 1].getStardistanceFromRoute()
print("best star dist", shortestStarDist , "used route", dist, dist / len(bestRoute))
else:
print("best star dist", shortestStarDist , "noting used")
print("best dist", bestDist)
print("best bestBackToStartDist", bestBackToStartDist)
print(usedRoute)
stop = timeit.default_timer()
print(round(stop - start, 3))
# useSlowMod = False
#######################
## rerouting in slow mode
## calc all avalibel routes to the best route (optimize the path)
#######################
if useSlowMod == True:
systemList = []
for system in bestRoute:
if myStartSystem != system.systemID:
systemList.append([ system.systemID, system.starDist ])
print("slow calc with %d rare systems" % len(systemList))
startSystem = elite.route(mydb, None, maxDeep, maxJumpDistance, maxHops)
startSystem._availableSystemList = systemList
startSystem.systemID = mydb.getSystemIDbyName(myStartSystem) # Startsystem
startSystem.initSystem = startSystem
startSystem.calcAllRoutesFromSystem(True) # slow mode calc
# maxDeep = startSystem.calcRoutingDeep()
# route = startSystem.getBestRoute(maxDeep)
(bestRoute, bestSellCount, bestRating) = getBestRouteFromResult(startSystem, False, maxDeep) # #recrusive run
return (bestRoute, bestSellCount, bestRating)
wonRating = None
wonRoute = None
for calcDeep in range(minDeep, maxDeep + 1):
(route, bestSellCount, bestRating) = getBestRouteFromResult(startSystem, useSlowMod, maxDeep=calcDeep)
rating = bestRating * ((1 + bestSellCount) * 1.2) + route[len(route) - 1].deep * 10
print("----------->", calcDeep, rating, bestSellCount, bestRating)
if not wonRating or wonRating > rating:
wonRating = rating
wonRoute = route
print("won ----------->", wonRating)
route = wonRoute
################
# print my Resultat list
################
for system in route: # reset sell status
system._sellDone = None
system._raresInSystem = None
totalDist = 0
for sysCount, system in enumerate(route):
if system._raresInSystem == None: system._raresInSystem = myrares.getRaresInSystem(system.systemID)
if system._dist :
diststr = "%dly" % system._dist
totalDist += system._dist
else: diststr = "Start"
print("%d. %s -> %s" % (sysCount + 1, diststr, mydb.getSystemname(system.systemID)))
if sysCount + 1 < len(route): # next sytsem for commodity deals
nextSystem = route[sysCount + 1]
else:
nextSystem = route[0]
commodTrades = []
if system._raresInSystem:
print("\t__Buy__")
for item in system._raresInSystem:
print("\t\tRareitem: %s From Station: %s dist:%dls" % (item["Name"], mydb.getStationname(item["StationID"]) , mydb.getStarDistFromStation(item["StationID"])))
nextRareStations = myrares.getRaresInSystem(nextSystem.systemID)
if not nextRareStations: # endloop fallback
commodTrades.append([item["StationID"], ""])
else:
for raresStation in nextRareStations:
if [item["StationID"], raresStation["StationID"]] in commodTrades:
pass
else:
commodTrades.append([item["StationID"], raresStation["StationID"]])
else:
print("\tNo rares to buy")
nextRareStations = myrares.getRaresInSystem(nextSystem.systemID)
if nextRareStations: # endloop fallback
for raresStation in nextRareStations:
commodTrades.append(["", raresStation["StationID"]])
if commodTrades:
print("\t__Buy__")
for stationForCommod in commodTrades:
if not stationForCommod[0]: # no from station avalibel (start system have no rares) fallback and calc all stations in system
commodityDeals = []
for station in mydb.getStationsFromSystem(system.systemID):
newcommodityDeals = mydb.getDealsFromTo(station[0], stationForCommod[1])
if newcommodityDeals: commodityDeals += newcommodityDeals
elif not stationForCommod[1]: # no to station avalibel (start system have no rares) fallback and calc all stations in system
commodityDeals = []
for station in mydb.getStationsFromSystem(nextSystem.systemID):
newcommodityDeals = mydb.getDealsFromTo(stationForCommod[0], station[0])
if newcommodityDeals: commodityDeals += newcommodityDeals
else:
commodityDeals = mydb.getDealsFromTo(stationForCommod[0], stationForCommod[1])
commodityDeals = sorted(commodityDeals, key=lambda items: items["profit"], reverse=True)
if commodityDeals == False:
print("\t\t\tNo commodity data avalibel")
elif not commodityDeals:
print("\t\t\tNo com deals")
else:
for count, commodItem in enumerate(commodityDeals):
print("\t\t\tCom: %s from: %s %dcr to: %s %dcr profit:%d" % (commodItem["itemName"], commodItem["fromStation"], commodItem["stationSell"], commodItem["toStation"], commodItem["stationBuy"], commodItem["profit"]))
if count >= 2: break # only show the first 3
# # sell items calc
sellItems(system, route, False)
# back to start
dist = mydb.getDistanceFromTo(system.systemID, route[0].systemID)
totalDist += dist
print("%d. back to %s -> %dly" % (sysCount + 2, mydb.getSystemname(route[0].systemID) , round(dist)))
sellItems(route[0], route, False)
# # rest items
myTitle = True
for oldsystem in route:
if oldsystem._sellDone:
continue
if oldsystem._raresInSystem:
# oldsystem._sellDone = True
if myTitle:
print("\t__Sell Rest__")
myTitle = False
dist = mydb.getDistanceFromTo(oldsystem.systemID, route[0].systemID)
for item in oldsystem._raresInSystem:
sellList = []
for loopsystem in route:
dist2 = mydb.getDistanceFromTo(oldsystem.systemID, loopsystem.systemID)
if dist2 >= sellDist:
sellList.append("%s -> %dly" % ( mydb.getSystemname(loopsystem.systemID) , dist2))
print("\t\tItem: %s buydist:%d or sell in: %s" % (item["Name"], dist, ", ".join(sellList)))
print("total loop len %dly" % totalDist)
stop = timeit.default_timer()
print(round(stop - start, 3))
print(sys.version)
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet import timeout as etimeout
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
import unittest2
from nova import exception
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import ioutils
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_class(self, mock_import_object):
self._vmops._load_vif_driver_class()
mock_import_object.assert_called_once_with(
self._vmops._vif_driver_class_map[CONF.network_api_class])
self.assertEqual(self._vmops._vif_driver,
mock_import_object.return_value)
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_class_error(self, mock_import_object):
mock_import_object.side_effect = KeyError
self.assertRaises(TypeError, self._vmops._load_vif_driver_class)
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2],
max_mem_kb=mock.sentinel.FAKE_MEM_KB,
mem_kb=mock.sentinel.FAKE_MEM_KB,
num_cpu=mock.sentinel.FAKE_NUM_CPU,
cpu_time_ns=mock.sentinel.FAKE_CPU_NS)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
def _prepare_create_root_vhd_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'MaxInternalSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(vmutils.VHDResizeException,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
vmutils.VHDResizeException, self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
def test_create_ephemeral_vhd(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = self.FAKE_SIZE
best_supported = self._vmops._vhdutils.get_best_supported_vhd_format
best_supported.return_value = mock.sentinel.FAKE_FORMAT
self._vmops._pathutils.get_ephemeral_vhd_path.return_value = (
mock.sentinel.FAKE_PATH)
response = self._vmops.create_ephemeral_vhd(instance=mock_instance)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_called_with(
mock_instance.name, mock.sentinel.FAKE_FORMAT)
self._vmops._vhdutils.create_dynamic_vhd.assert_called_with(
mock.sentinel.FAKE_PATH, mock_instance.ephemeral_gb * units.Gi,
mock.sentinel.FAKE_FORMAT)
self.assertEqual(mock.sentinel.FAKE_PATH, response)
@mock.patch('nova.virt.hyperv.vmops.VMOps.destroy')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_on')
@mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance')
@mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_ephemeral_vhd')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_vhd')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.'
'ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
def _test_spawn(self, mock_delete_disk_files,
mock_ebs_root_in_block_devices, mock_create_root_vhd,
mock_create_ephemeral_vhd, mock_get_image_vm_gen,
mock_create_instance, mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_power_on, mock_destroy, exists, boot_from_volume,
configdrive_required, fail):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
fake_root_path = mock_create_root_vhd.return_value
fake_root_path = None if boot_from_volume else fake_root_path
fake_ephemeral_path = mock_create_ephemeral_vhd.return_value
fake_vm_gen = mock_get_image_vm_gen.return_value
fake_config_drive_path = mock_create_config_drive.return_value
self._vmops._vmutils.vm_exists.return_value = exists
mock_ebs_root_in_block_devices.return_value = boot_from_volume
mock_create_root_vhd.return_value = fake_root_path
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
elif fail is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
mock_destroy.assert_called_once_with(mock_instance)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.DEV_INFO)
if not boot_from_volume:
mock_create_root_vhd.assert_called_once_with(self.context,
mock_instance)
mock_create_ephemeral_vhd.assert_called_once_with(mock_instance)
mock_get_image_vm_gen.assert_called_once_with(fake_root_path,
mock_image_meta)
mock_create_instance.assert_called_once_with(
mock_instance, mock.sentinel.INFO, mock.sentinel.DEV_INFO,
fake_root_path, fake_ephemeral_path, fake_vm_gen)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.INFO)
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
def test_spawn(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True,
fail=vmutils.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=False, fail=None)
def test_spawn_root_in_block(self):
self._test_spawn(exists=False, boot_from_volume=True,
configdrive_required=False, fail=None)
def test_spawn_no_admin_permissions(self):
self._vmops._vmutils.check_admin_permissions.side_effect = (
vmutils.HyperVException)
self.assertRaises(vmutils.HyperVException,
self._vmops.spawn,
self.context, mock.DEFAULT, mock.DEFAULT,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_attach_drive')
def _test_create_instance(self, mock_attach_drive, mock_attach_volumes,
fake_root_path, fake_ephemeral_path,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1):
mock_vif_driver = mock.MagicMock()
self._vmops._vif_driver = mock_vif_driver
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
instance_path = os.path.join(CONF.instances_path, mock_instance.name)
self._vmops.create_instance(instance=mock_instance,
network_info=[fake_network_info],
block_device_info=mock.sentinel.DEV_INFO,
root_vhd_path=fake_root_path,
eph_vhd_path=fake_ephemeral_path,
vm_gen=vm_gen)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, mock_instance.memory_mb,
mock_instance.vcpus, CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path,
[mock_instance.uuid])
expected = []
ctrl_type = vmops.VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if fake_root_path:
expected.append(mock.call(mock_instance.name, fake_root_path,
0, ctrl_disk_addr, ctrl_type,
constants.DISK))
ctrl_disk_addr += 1
if fake_ephemeral_path:
expected.append(mock.call(mock_instance.name,
fake_ephemeral_path, 0, ctrl_disk_addr,
ctrl_type, constants.DISK))
mock_attach_drive.has_calls(expected)
self._vmops._vmutils.create_scsi_controller.assert_called_once_with(
mock_instance.name)
ebs_root = vm_gen is not constants.VM_GEN_2 and fake_root_path is None
mock_attach_volumes.assert_called_once_with(mock.sentinel.DEV_INFO,
mock_instance.name,
ebs_root)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
def test_create_instance(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_root_path(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_ephemeral_path(self):
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=None,
enable_instance_metrics=True)
def test_create_instance_no_path(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False)
def test_create_instance_enable_instance_metrics_false(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def _check_get_image_vm_gen_except(self, image_prop):
image_meta = {"properties": {constants.IMAGE_PROP_VM_GEN: image_prop}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self.assertRaises(vmutils.HyperVException,
self._vmops.get_image_vm_generation,
mock.sentinel.FAKE_PATH,
image_meta)
def test_get_image_vm_generation_default(self):
image_meta = {"properties": {}}
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = {"properties": {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHDX)
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_get_image_vm_generation_bad_prop(self):
self._check_get_image_vm_gen_except(mock.sentinel.FAKE_IMAGE_PROP)
def test_get_image_vm_generation_not_vhdx(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self._check_get_image_vm_gen_except(constants.IMAGE_PROP_VM_GEN_2)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
self._vmops._pathutils.get_instance_dir.return_value = (
self.FAKE_DIR)
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
if config_drive_format != self.ISO9660:
self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
else:
path = self._vmops._create_config_drive(mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata())
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_other_drive_format(self):
self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
def test_destroy(self, mock_power_off, mock_delete_disk_files,
mock_disconnect_volumes):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = True
self._vmops.destroy(instance=mock_instance,
block_device_info=mock.sentinel.FAKE_BD_INFO)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.FAKE_BD_INFO)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
def test_destroy_inexistent_instance(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = False
self._vmops.destroy(instance=mock_instance)
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
def test_destroy_exception(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.destroy_vm.side_effect = vmutils.HyperVException
self._vmops._vmutils.vm_exists.return_value = True
self.assertRaises(vmutils.HyperVException,
self._vmops.destroy, mock_instance)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_ENABLED)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance)
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = vmutils.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_pause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_PAUSED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout, set_state_expected=True):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
if set_state_expected:
mock_set_state.assert_called_once_with(
instance, constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_unexisting_instance(self, mock_soft_shutdown):
mock_soft_shutdown.side_effect = exception.NotFound
self._test_power_off(timeout=1, set_state_expected=False)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on_having_block_devices(self, mock_set_vm_state,
mock_fix_instance_vol_paths):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance, mock.sentinel.block_device_info)
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
mock_instance, constants.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, 'log_vm_serial_output')
@mock.patch.object(vmops.VMOps, '_delete_vm_console_log')
def _test_set_vm_state(self, mock_delete_vm_console_log,
mock_log_vm_output, state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
if state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
mock_delete_vm_console_log.assert_called_once_with(mock_instance)
if state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
mock_log_vm_output.assert_called_once_with(mock_instance.name,
mock_instance.uuid)
def test_set_vm_state_disabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
self._test_set_vm_state(state=constants.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.set_vm_state.side_effect = vmutils.HyperVException
self.assertRaises(vmutils.HyperVException, self._vmops._set_vm_state,
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
@mock.patch.object(ioutils, 'IOThread')
def _test_log_vm_serial_output(self, mock_io_thread,
worker_running=False,
worker_exists=False):
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.log_path, )
fake_instance_uuid = 'fake-uuid'
fake_existing_worker = mock.Mock()
fake_existing_worker.is_active.return_value = worker_running
fake_log_writers = {fake_instance_uuid: fake_existing_worker}
self._vmops._vm_log_writers = (
fake_log_writers if worker_exists else {})
self._vmops.log_vm_serial_output(mock.sentinel.instance_name,
fake_instance_uuid)
if not (worker_exists and worker_running):
expected_pipe_path = r'\\.\pipe\%s' % fake_instance_uuid
expected_current_worker = mock_io_thread.return_value
expected_current_worker.start.assert_called_once_with()
mock_io_thread.assert_called_once_with(
expected_pipe_path, mock.sentinel.log_path,
self._vmops._MAX_CONSOLE_LOG_FILE_SIZE)
else:
expected_current_worker = fake_existing_worker
self.assertEqual(expected_current_worker,
self._vmops._vm_log_writers[fake_instance_uuid])
def test_log_vm_serial_output_unexisting_worker(self):
self._test_log_vm_serial_output()
def test_log_vm_serial_output_worker_stopped(self):
self._test_log_vm_serial_output(worker_exists=True)
def test_log_vm_serial_output_worker_running(self):
self._test_log_vm_serial_output(worker_exists=True,
worker_running=True)
def test_copy_vm_console_logs(self):
fake_local_paths = (mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_PATH_ARCHIVED)
fake_remote_paths = (mock.sentinel.FAKE_REMOTE_PATH,
mock.sentinel.FAKE_REMOTE_PATH_ARCHIVED)
self._vmops._pathutils.get_vm_console_log_paths.side_effect = [
fake_local_paths, fake_remote_paths]
self._vmops._pathutils.exists.side_effect = [True, False]
self._vmops.copy_vm_console_logs(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST)
calls = [mock.call(mock.sentinel.FAKE_VM_NAME),
mock.call(mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST)]
self._vmops._pathutils.get_vm_console_log_paths.assert_has_calls(calls)
calls = [mock.call(mock.sentinel.FAKE_PATH),
mock.call(mock.sentinel.FAKE_PATH_ARCHIVED)]
self._vmops._pathutils.exists.assert_has_calls(calls)
self._vmops._pathutils.copy.assert_called_once_with(
mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_REMOTE_PATH)
@mock.patch.object(vmops.ioutils, 'IOThread')
def test_log_vm_serial_output(self, fake_iothread):
self._vmops._pathutils.get_vm_console_log_paths.return_value = [
mock.sentinel.FAKE_PATH]
self._vmops.log_vm_serial_output(mock.sentinel.FAKE_VM_NAME,
self.FAKE_UUID)
pipe_path = r'\\.\pipe\%s' % self.FAKE_UUID
fake_iothread.assert_called_once_with(
pipe_path, mock.sentinel.FAKE_PATH,
self._vmops._MAX_CONSOLE_LOG_FILE_SIZE)
fake_iothread.return_value.start.assert_called_once_with()
@unittest2.skip('mock_open in 1.2 read only works once 1475661')
@mock.patch("os.path.exists")
def test_get_console_output(self, fake_path_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_path_exists.return_value = True
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_PATH_ARCHIVED)
with mock.patch('nova.virt.hyperv.vmops.open',
mock.mock_open(read_data=self.FAKE_LOG),
create=True):
instance_log = self._vmops.get_console_output(mock_instance)
# get_vm_console_log_paths returns 2 paths.
self.assertEqual(self.FAKE_LOG * 2, instance_log)
expected_calls = [mock.call(mock.sentinel.FAKE_PATH_ARCHIVED),
mock.call(mock.sentinel.FAKE_PATH)]
fake_path_exists.assert_has_calls(expected_calls, any_order=False)
@mock.patch("__builtin__.open")
@mock.patch("os.path.exists")
def test_get_console_output_exception(self, fake_path_exists, fake_open):
fake_vm = mock.MagicMock()
fake_open.side_effect = vmutils.HyperVException
fake_path_exists.return_value = True
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.fake_console_log_path,
mock.sentinel.fake_console_log_archived)
with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
self.assertRaises(vmutils.HyperVException,
self._vmops.get_console_output,
fake_vm)
@mock.patch.object(vmops.fileutils, 'delete_if_exists')
def test_delete_vm_console_log(self, mock_delete_if_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.FAKE_PATH, )
mock_log_writer = mock.MagicMock()
self._vmops._vm_log_writers[mock_instance['uuid']] = mock_log_writer
self._vmops._delete_vm_console_log(mock_instance)
mock_log_writer.join.assert_called_once_with()
mock_delete_if_exists.assert_called_once_with(mock.sentinel.FAKE_PATH)
def test_create_vm_com_port_pipe(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
pipe_path = r'\\.\pipe\%s' % mock_instance['uuid']
self._vmops._create_vm_com_port_pipe(mock_instance)
get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection
get_vm_serial_port.assert_called_once_with(mock_instance['name'],
update_connection=pipe_path)
@mock.patch.object(vmops.VMOps, "log_vm_serial_output")
@mock.patch("os.path.basename")
@mock.patch("os.path.exists")
def test_restart_vm_log_writers(self, mock_exists, mock_basename,
mock_log_vm_output):
self._vmops._vmutils.get_active_instances.return_value = [
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_VM_NAME_OTHER]
mock_exists.side_effect = [True, False]
self._vmops.restart_vm_log_writers()
calls = [mock.call(mock.sentinel.FAKE_VM_NAME),
mock.call(mock.sentinel.FAKE_VM_NAME_OTHER)]
self._vmops._pathutils.get_instance_dir.assert_has_calls(calls)
get_vm_serial_port = self._vmops._vmutils.get_vm_serial_port_connection
get_vm_serial_port.assert_called_once_with(mock.sentinel.FAKE_VM_NAME)
mock_log_vm_output.assert_called_once_with(mock.sentinel.FAKE_VM_NAME,
mock_basename.return_value)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
def test_copy_vm_dvd_disks(self):
fake_paths = [mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DVD_PATH2]
mock_copy = self._vmops._pathutils.copyfile
mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths
mock_get_dvd_disk_paths.return_value = fake_paths
self._vmops._pathutils.get_instance_dir.return_value = (
mock.sentinel.FAKE_DEST_PATH)
self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST_HOST)
mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST_HOST)
mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DEST_PATH),
mock.call(mock.sentinel.FAKE_DVD_PATH2,
mock.sentinel.FAKE_DEST_PATH))
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration as conf
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.settings import Session
from airflow.utils.timezone import datetime, utcnow
from airflow.www import app as application
class TestBase(unittest.TestCase):
def setUp(self):
conf.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
class TestApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super(TestApiExperimental, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def setUp(self):
super(TestApiExperimental, self).setUp()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super(TestApiExperimental, self).tearDown()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.client.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_task_paused(self):
url_template = '/api/experimental/dags/{}/paused/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'true')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
url_template = '/api/experimental/dags/{}/paused/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'false')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = utcnow() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': execution_date.isoformat()}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.client.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
def test_dagrun_status(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(TestBase):
@classmethod
def setUpClass(cls):
super(TestPoolApiExperimental, cls).setUpClass()
session = Session()
session.query(Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolApiExperimental, self).setUp()
self.pools = []
for i in range(2):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[0]
def tearDown(self):
self.session.query(Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolApiExperimental, self).tearDown()
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), 2)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), 3)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), 2)
def test_delete_pool(self):
response = self.client.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), 1)
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# yet another attempt at a proper UnixProcess able to cope with buffer limiting
# both on input to the subprocess and on output to the destination inbox
"""\
===================================================
Unix sub processes with communication through pipes
===================================================
UnixProcess allows you to start a separate process and send data to it and
receive data from it using the standard input/output/error pipes and optional
additional named pipes.
This component works on *nix platforms only. It is almost certainly not Windows
compatible. Tested only under Linux.
Example Usage
-------------
Using the 'wc' word count GNU util to count the number of lines in some data::
Pipeline( RateControlledFileReader(filename, ... ),
UnixProcess("wc -l"),
ConsoleEchoer(),
).run()
Feeding separate audio and video streams to ffmpeg, and taking the encoded
output::
Graphline(
ENCODER = UnixProcess( "ffmpeg -i audpipe -i vidpipe -",
inpipes = { "audpipe":"audio",
"vidpipe":"video",
},
boxsizes = { "audio":2, "video":2 }
),
VIDSOURCE = MaxSpeedFileReader(...),
AUDSOURCE = MaxSpeFileReader(...),
SINK = SimpleFileWriter("encodedvideo"),
linkages = {
("VIDSOURCE","outbox") : ("ENCODER","video"),
("AUDSOURCE","outbox") : ("ENCODER","audio"),
("ENCODER","outbox") : ("SINK", "inbox"),
}
).run()
Behaviour
---------
At initialisation, specify:
* the command to invoke the sub process
* the size limit for internal buffers
* additional named input and output pipes
* box size limits for any input pipe's inbox, including "inbox" for STDIN
Named input pipes must all use different inbox names. They must not use "inbox"
or "control". Named output pipes may use any outbox name they wish. More than
one named ouput pipe can use the same outbox, including "outbox".
The pipe files needed for named pipes are created automatically at activation
and are deleted at termination.
Activate UnixProcess and the sub process will be started. Use the inboxes and
outboxes of UnixProcess to communicate with the sub process. For example::
UnixProcess( "ffmpeg -i /tmp/inpipe -f wav /tmp/outpipe",
inpipes = { "/tmp/inpipe" :"in" },
outpipes = { "/tmp/outpipe":"out" },
)
________________________________________________
| UnixProcess |
| _______________________ |
| | "ffmpeg -i ..." | |
---> "inbox" ---> STDIN STDOUT ---> "outbox" --->
| | STDERR ---> "error" --->
| | | |
---> "in" ---> "/tmp/inpipe" "/tmp/outpipe" ---> "out" --->
| |_______________________| |
|________________________________________________|
Send binary string data to the "inbox" inbox and it will be sent to STDIN of
the proces.
Binary string data from STDOUT and STDERR is sent out of the "outbox" and
"error" outboxes respectively.
To send to a named pipe, send binary string data to the respective inbox you
specified.
Data written by the sub process to a named output pipe will be sent out of the
respective outbox.
The specified buffering size sets a maximum limit on the amount of data that
can be buffered on the inputs and outputs to and from the sub process. It also
determines the chunk size in which data coming from the sub process will emerge.
Note therefore that data may languish in an output buffer indefinitely until
the process terminates. Do not assume that data coming from a sub process will
emerge the moment it is generated.
UnixProcess will leave data in its inboxes until it is able to send it to the
required pipe. If a destination box is full (noSpaceInBox exception) then
UnixProces will wait and retry until it succeeds in sending the data.
If the sub process closes its pipes (STDIN, STDOUT, STDERR) then UnixProcess
will close its named input and output pipes too, and will send a
producerFinished message out of its "signal" outbox then immediately terminate.
If a producerFinished message is received on the "control" inbox, UnixProcess
will finish passing any pending data waiting in inboxes into the sub process and
will finish passing on any pending data waiting to come from the sub process
onto destination outboxes. Once this is complete, UnixProcess will close all
pipes and send a producerFinished message out of its "signal" outbox and
immediately terminate.
If a shutdownMicroprocess message is received on the "control" inbox,
UnixProcess will close all pipes as soon as possible and send the message on
out of its "signal" outbox and immediately terminate.
How does it work?
-----------------
The UnixProcess component itself is primarily just the initiator of the sub
process and a container for other child components that handle the actual I/O
with pipes. It uses _ToFileHandle and _FromFileHandle components for each input
and output pipe respectively.
For each specified named pipe, the specified pipe file is created if required
(using mkfifo).
The shutdown signalling boxes of all child components are daisy chained.
Shutdown messages sent to the "control" inbox of UnixProcess are routed to the
"control" inbox of the component handling STDIN. The shutdown message is then
propagated to named output pipes and then named input pipes.
If STDOUT close it causes STDERR to close. If STDERR closes then the shutdown
message is propagated to STDIN and then onto named pipes as described above.
When the process exits, it is assumed the STDIN, STDOUT and STDERR will close by
themselves in due course. However an explicit shutdown message is sent to the
named pipes.
XXX Fix Me
----------
If UnixProcess is terminated by receiving shutdown messages, it doesn't
currently explicitly terminate the sub process.
"""
from Axon.Component import component
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
from Kamaelia.IPC import newReader, newWriter
from Kamaelia.IPC import removeReader, removeWriter
from Kamaelia.Internet.Selector import Selector
import subprocess
import fcntl
import os
import sys
class UnixProcess(component):
"""\
UnixProcess(command[,buffersize][,outpipes][,inpipes][,boxsizes]) -> new UnixProcess component.
Starts the specified command as a separate process. Data can be sent to
stdin and received from stdout. Named pipes can also be created for extra
channels to get data to and from the process.
Keyword arguments::
- command -- command line string that will invoke the subprocess
- buffersize -- bytes size of buffers on the pipes to and from the process (default=32768)
- outpipes -- dict mapping named-pipe-filenames to outbox names (default={})
- inpipes -- dict mapping named-pipe-filenames to inbox names (default={})
- boxsizes -- dict mapping inbox names to box sizes (default={})
"""
Inboxes = { "inbox" : "Binary string data to go to STDIN of the process.",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Binary string data from STDOUT of the process",
"error" : "Binary string data from STDERR of the process",
"signal" : "Shutdown signalling",
"_shutdownPipes" : "For shutting down any named pipes used for output"
}
def __init__(self, command, buffersize=32768, outpipes={}, inpipes={}, boxsizes={}):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
# create additional outboxes and inboxes for the additional named pipes
# requested. Doing this before the super() call.
# XXX HACKY - ought to be a better way
for outpipe,outboxname in outpipes.items():
self.Outboxes[outboxname] = "Output from named pipe: "+outpipe
for inpipe,inboxname in inpipes.items():
self.Inboxes[inboxname] = "Input to named pipe: "+inpipe
super(UnixProcess,self).__init__()
self.command = command
self.buffersize = buffersize
self.outpipes = outpipes
self.inpipes = inpipes
self.boxsizes = boxsizes
def main(self):
"""main loop"""
# SETUP
# lets add any named pipes requested
# passing an outbox which will send a shutdown message, so the pipe
# handlers we create can daisy chain shutdowns together
pipeshutdownbox = (self,"_shutdownPipes")
pipeshutdownbox = self.setupNamedOutPipes(pipeshutdownbox)
pipeshutdownbox, firstinpipe = self.setupNamedInPipes(pipeshutdownbox)
# set up the subprocess
p = subprocess.Popen(self.command,
shell=True,
bufsize=self.buffersize,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds=True)
# sort standard IO
makeNonBlocking( p.stdin.fileno() )
makeNonBlocking( p.stdout.fileno() )
makeNonBlocking( p.stderr.fileno() )
# set up child components to handle the IO
STDIN = _ToFileHandle(p.stdin)
STDOUT = _FromFileHandle(p.stdout, self.buffersize)
STDERR = _FromFileHandle(p.stderr, self.buffersize)
# make their names more useful for debuggin
STDIN.name = "[UnixProcess stdin] "+STDIN.name
STDOUT.name = "[UnixProcess stdout] "+STDOUT.name
STDERR.name = "[UnixProcess stderr] "+STDERR.name
# stdin from inbox; stdout and stderr to outboxes
self.link((self,"inbox"), (STDIN,"inbox"), passthrough=1),
self.link((STDOUT,"outbox"), (self,"outbox"), passthrough=2),
self.link((STDERR,"outbox"), (self,"error"), passthrough=2),
# if outputs close, then close input too
self.link((STDOUT,"signal"), (STDIN,"control")),
self.link((STDERR,"signal"), (STDIN,"control")),
# if ordered from outside, then close input
self.link((self,"control"), (STDIN, "control"), passthrough=1),
# set box size limits
if "inbox" in self.boxsizes:
STDIN.inboxes['inbox'].setSize(self.boxsizes['inbox'])
# wire up such that if standard input closes, then it should cause all
# other named pipes sending to the process to close down
if firstinpipe is not None:
self.link((STDIN,"signal"),(firstinpipe,"control"))
self.addChildren(STDIN,STDOUT,STDERR)
# GO!
for child in self.childComponents():
child.activate()
shutdownMsg = producerFinished(self)
while not self.childrenDone():
# if the process has exited, make sure we shutdown all the pipes
if p.poll() is not None:
self.send(producerFinished(self), "_shutdownPipes")
else:
self.pause()
yield 1
# SHUTDOWN
self.send(shutdownMsg,"signal")
# delete any named pipes
for outpipename in self.outpipes.keys():
os.remove(outpipename)
for inpipename in self.inpipes.keys():
os.remove(inpipename)
def setupNamedOutPipes(self, pipeshutdown):
# lets add any named output pipes requested
for (outpipename,outboxname) in self.outpipes.items():
# create the pipe
try:
os.mkfifo(outpipename)
except:
pass
# open the file handle for reading
f = open(outpipename, "rb+",self.buffersize)
# create the handler component to receive from that pipe
PIPE = _FromFileHandle(f, self.buffersize)
self.link((PIPE,"outbox"), (self,outboxname), passthrough=2)
# wire up and inbox for it, and daisy chain its control box from the
# previous pipe's signal box
self.link(pipeshutdown,(PIPE,"control"))
pipeshutdown=(PIPE,"signal")
self.addChildren(PIPE)
# give it a useful name (for debugging), and make it our child
PIPE.name = "[UnixProcess outpipe '"+outpipename+"'] "+PIPE.name
return pipeshutdown
def setupNamedInPipes(self,pipeshutdown):
# lets add any named input pipes requested
firstinpipe = None
for (inpipename,inboxname) in self.inpipes.items():
# create the pipe
try:
os.mkfifo(inpipename)
except:
pass
# open the file handle for writing
f = open(inpipename, "wb+", self.buffersize)
# create the handler component to send to that pipe
PIPE = _ToFileHandle(f)
# note it down if this is the first
if firstinpipe is None:
firstinpipe = PIPE
# wire up and inbox for it, and daisy chain its control box from the
# previous pipe's signal box
self.link((self,inboxname), (PIPE,"inbox"), passthrough=1)
self.link(pipeshutdown,(PIPE,"control"))
pipeshutdown=(PIPE,"signal")
# limit its box size (if specified)
if inboxname in self.boxsizes:
PIPE.inboxes["inbox"].setSize(self.boxsizes[inboxname])
self.addChildren(PIPE)
# give it a useful name (for debugging)
PIPE.name = "[UnixProcess inpipe '"+inpipename+"'] "+PIPE.name
return pipeshutdown, firstinpipe
def childrenDone(self):
"""Unplugs any children that have terminated, and returns true if there are no
running child components left (ie. their microproceses have finished)
"""
for child in self.childComponents():
if child._isStopped():
self.removeChild(child) # deregisters linkages for us
return 0==len(self.childComponents())
def makeNonBlocking(fd):
"""Set a file handle to non blocking behaviour on read & write"""
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NDELAY)
class _ToFileHandle(component):
"""\
_ToFileHandle(fileHandle) -> new _ToFileHandle component.
Send data to this component and it will be written to the specified file
handle, in non-blocking mode. Uses the Selector service to wake. Leaves data
in the inbox when blocked.
Keyword arguments::
- fileHandle -- file handle open for binary mode writing
"""
Inboxes = { "inbox" : "Binary string data to be written to the file handle",
"control" : "Shutdown signalling",
"ready" : "Notifications from the Selector",
}
Outboxes = { "outbox" : "NOT USED",
"signal" : "Shutdown signalling",
"selector" : "For communication to the Selector",
}
def __init__(self, fileHandle):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(_ToFileHandle,self).__init__()
self.fh = fileHandle
makeNonBlocking(self.fh)
self.shutdownMsg = None
def checkShutdown(self,noNeedToWait):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,shutdownMicroprocess):
self.shutdownMsg=msg
raise "STOP"
elif isinstance(msg, producerFinished):
if not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg=msg
else:
pass
if self.shutdownMsg and noNeedToWait:
raise "STOP"
def main(self):
selectorService, selectorShutdownService, S = Selector.getSelectorServices(self.tracker)
if S:
S.activate()
yield 1
yield 1
yield 1
self.link((self, "selector"), (selectorService))
self.send(newWriter(self,((self, "ready"), self.fh)), "selector")
dataPending=""
try:
while 1:
# no data pending
if dataPending=="":
while not self.dataReady("inbox"):
self.checkShutdown(noNeedToWait=True)
self.pause()
yield 1
dataPending = self.recv("inbox")
# now try to send it
try:
#self.fh.write(dataPending)
byteswritten = os.write(self.fh.fileno(),dataPending)
if byteswritten >= 0:
dataPending = dataPending[byteswritten:]
# dataPending=""
except OSError,IOError:
# data pending
# wait around until stdin is ready
if not self.dataReady("ready"):
self.send(newWriter(self,((self, "ready"), self.fh)), "selector")
while not self.dataReady("ready"):
self.checkShutdown(noNeedToWait=False)
self.pause()
yield 1
self.recv("ready")
self.checkShutdown(noNeedToWait=False)
except "STOP":
pass # ordered to shutdown!
self.send(removeWriter(self,(self.fh)), "selector")
try:
self.fh.close()
except:
pass
self.send(self.shutdownMsg,"signal")
class _FromFileHandle(component):
"""\
_FromFileHandle(fileHandle[,maxReadChunkSize]) -> new _FromFileHandle component.
Reads binary string data from the specified file handle in non blocking mode.
Uses the Selector service it wake when blocked. Will wait if the destination
box is full.
Keyword arguments::
- fileHandle -- File handle to read data from
- maxReadChunkSize -- Maximum number of bytes read at a time (default=32768)
"""
Inboxes = { "inbox" : "NOT USED",
"control" : "Shutdown signalling",
"ready" : "Notifications from the Selector service",
}
Outboxes = { "outbox" : "Binary string data read from the file handle",
"signal" : "Shutdown signalling",
"selector" : "Requests to the Selector service",
}
def __init__(self, fileHandle,maxReadChunkSize=32768):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(_FromFileHandle,self).__init__()
self.fh = fileHandle
makeNonBlocking(self.fh)
self.maxReadChunkSize = maxReadChunkSize
if self.maxReadChunkSize <= 0:
self.maxReadChunkSize=32768
self.shutdownMsg = None
def checkShutdown(self):
# ignore producerFinished messages, as they're meaningless to us - we're a source
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,shutdownMicroprocess):
self.shutdownMsg=msg
raise "STOP"
elif isinstance(msg,producerFinished):
self.shutdownMsg=msg
return self.shutdownMsg
def main(self):
selectorService, selectorShutdownService, S = Selector.getSelectorServices(self.tracker)
if S:
S.activate()
yield 1
yield 1
yield 1
self.link((self, "selector"), (selectorService))
dataPending = ""
waitingToStop=False
self.shutdownMsg = None
try:
while 1:
while dataPending:
self.checkShutdown()
try:
self.send(dataPending,"outbox")
dataPending=""
except noSpaceInBox:
self.pause()
yield 1
while not dataPending:
try:
#dataPending=self.fh.read(self.maxReadChunkSize)
dataPending = os.read(self.fh.fileno(), self.maxReadChunkSize)
if dataPending=="":
raise "STOP"
except OSError,IOError:
# no data available yet, need to wait
if self.checkShutdown():
raise "STOP"
if self.dataReady("ready"):
self.recv("ready")
else:
self.send(newReader(self,((self, "ready"), self.fh)), "selector")
while not self.dataReady("ready") and not self.checkShutdown():
self.pause()
yield 1
if self.dataReady("ready"):
self.recv("ready")
except "STOP":
pass # ordered to shutdown!
self.send(removeReader(self,(self.fh)), "selector")
try:
self.fh.close()
except:
pass
yield 1
yield 1
if not self.shutdownMsg:
self.send(producerFinished(self), "signal")
else:
self.send(self.shutdownMsg,"signal")
__kamaelia_components__ = ( UnixProcess, )
if __name__=="__main__":
class ChargenComponent(component):
def main(self):
import time
ts = t = time.time()
b = 0
i=0
while time.time() - t <0.1:
yield 1
self.send("hello%5d\n" % i, "outbox")
i+=1
b += len("hello12345\n")
self.send("byebye!!!!!\n", "outbox")
b+=len("byebye!!!!!\n")
self.send(producerFinished(), "signal")
print "total sent", b
from Axon.ThreadedComponent import threadedcomponent
class LineSplit(component):
def main(self):
self.inboxes['inbox'].setSize(1)
while 1:
while not self.dataReady("inbox"):
self.pause()
yield 1
msg = self.recv("inbox").split("\n")
while msg:
try:
self.send(msg[0],"outbox")
msg.pop(0)
except noSpaceInBox:
self.pause()
yield 1
class Chunk(component):
def __init__(self,chunksize):
super(Chunk,self).__init__()
self.chunksize=chunksize
def main(self):
self.inboxes['inbox'].setSize(1)
while 1:
while not self.dataReady("inbox"):
self.pause()
yield 1
msg = self.recv("inbox")
for i in range(0,len(msg),self.chunksize):
send = msg[i:i+self.chunksize]
while 1:
try:
self.send(send,"outbox")
break
except noSpaceInBox:
self.pause()
yield 1
yield 1
class RateLimiter(threadedcomponent):
def __init__(self,rate):
super(RateLimiter,self).__init__(queuelengths=1)
self.interval=1.0/rate
self.inboxes['inbox'].setSize(1)
def main(self):
import time
while 1:
time.sleep(self.interval)
while not self.dataReady("inbox"):
self.pause()
msg = self.recv("inbox")
while 1:
try:
self.send(msg,"outbox")
break
except noSpaceInBox:
self.pause()
class CumulateSize(component):
def main(self):
i=0
while 1:
while self.dataReady("inbox"):
i += len(self.recv("inbox"))
self.send("%10d\n" % i,"outbox")
self.pause()
yield 1
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Util.Console import ConsoleEchoer
import os
# test="rate limit output"
# test="rate limited input"
# test="reached end of output"
# test="outpipes"
test="inpipes"
if test=="rate limit output":
Pipeline(
UnixProcess("cat /dev/zero",32*1024*1024),
LineSplit(),
Chunk(10),
RateLimiter(10),
CumulateSize(),
ConsoleEchoer(forwarder=True)
).run()
elif test=="rate limited input":
ratelimiter=RateLimiter(10)
ratelimiter.inboxes['inbox'].setSize(None)
Pipeline(
ChargenComponent(),
ratelimiter,
UnixProcess("cat -",32),
ConsoleEchoer(forwarder=True)
).run()
elif test=="reached end of output":
Pipeline(
ChargenComponent(),
UnixProcess("wc",32),
ConsoleEchoer(forwarder=True)
).run()
elif test=="outpipes":
try:
os.remove("/tmp/tmppipe")
except OSError:
pass
Graphline(
SRC = ChargenComponent(),
UXP = UnixProcess("cat - > /tmp/tmppipe",outpipes={"/tmp/tmppipe":"output"}),
DST = ConsoleEchoer(),
linkages = {
("SRC","outbox") : ("UXP","inbox"),
("UXP","output") : ("DST","inbox"),
("SRC","signal") : ("UXP","control"),
("UXP","signal") : ("DST","control"),
}
).run()
elif test=="inpipes":
try:
os.remove("/tmp/tmppipe")
except OSError:
pass
Graphline(
SRC = ChargenComponent(),
UXP = UnixProcess("cat /tmp/tmppipe",inpipes={"/tmp/tmppipe":"input"}),
DST = ConsoleEchoer(),
linkages = {
("SRC","outbox") : ("UXP","input"),
("UXP","outbox") : ("DST","inbox"),
("SRC","signal") : ("UXP","control"),
("UXP","signal") : ("DST","control"),
}
).run()
|
|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
A plugin for composing RGB images from constituent monochrome images.
**Plugin Type: Local**
``Compose`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
Start the ``Compose`` plugin from the "Operation->RGB" (below) or
"Plugins->RGB" (above) menu. The tab should show up under the
"Dialogs" tab in the viewer to the right as "IMAGE:Compose".
1. Select the kind of composition you want to make from the "Compose Type"
drop down: "RGB" for composing three monochrome images into a color
image, "Alpha" to compose a series of images as layers with different
alpha values for each layer.
2. Press "New Image" to start composing a new image.
***For RGB composition***
1. Drag your three constituent images that will make up the R, G, and B
planes to the "Preview" window -- drag them in the order R (red),
G (green), and B (blue). Alternatively, you can load the images into
the channel viewer one by one and after each one pressing "Insert from
Channel" (similarly, do these in the order of R, G, and B).
In the plugin GUI, the R, G, and B images should show up as three slider
controls in the "Layers" area of the plugin, and the Preview should show
a low resolution version of how the composite image looks with the sliders
set.
.. figure:: figures/compose-rgb.png
:width: 800px
:align: center
:alt: Composing an RGB image
Composing an RGB Image.
2. Play with the alpha levels of each layer using the sliders in the
``Compose`` plugin; as you adjust a slider the preview image should
update.
3. When you see something you like, you can save it to a file using the
"Save As" button (use "jpeg" or "png" as the file extension), or insert
it into the channel using the "Save to Channel" button.
***For Alpha composition***
For Alpha-type composition the images are just combined in the order shown
in the stack, with Layer 0 being the bottom layer, and successive layers
stacked on top. Each layer's alpha level is adjustible by a slider in the
same manner as discussed above.
.. figure:: figures/compose-alpha.png
:width: 800px
:align: center
:alt: Alpha-composing an image
Alpha-composing an image.
1. Drag your N constituent images that will make up the layers to the
"Preview" window, or load the images into the channel viewer one by
one and after each one pressing "Insert from Channel" (the first image
will be at the bottom of the stack--layer 0).
2. Play with the alpha levels of each layer using the sliders in the
``Compose`` plugin; as you adjust a slider the preview image should
update.
3. When you see something you like, you can save it to a file using the
"Save As" button (use "fits" as the file extension), or insert it into
the channel using the "Save to Channel" button.
***General Notes***
- The preview window is just a ginga widget, so all the usual bindings
apply; you can set color maps, cut levels, etc. with the mouse and key
bindings.
"""
import os
from ginga.gw import Widgets, Viewers
from ginga.misc import Bunch
from ginga import RGBImage, LayerImage, AstroImage
from ginga import GingaPlugin
from PIL import Image
__all__ = ['Compose']
class RGBComposeImage(RGBImage.RGBImage, LayerImage.LayerImage):
def __init__(self, *args, **kwargs):
RGBImage.RGBImage.__init__(self, *args, **kwargs)
LayerImage.LayerImage.__init__(self)
class AlphaComposeImage(AstroImage.AstroImage, LayerImage.LayerImage):
def __init__(self, *args, **kwargs):
AstroImage.AstroImage.__init__(self, *args, **kwargs)
LayerImage.LayerImage.__init__(self)
class Compose(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Compose, self).__init__(fv, fitsimage)
self.limage = None
self.images = []
self.count = 0
self._wd = 300
self._ht = 200
self.pct_reduce = 0.1
self._split_sizes = [600, 200, 200]
self.layertag = 'compose-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.set_callback('drag-drop', self.drop_file_cb)
self.canvas = canvas
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox = Widgets.VBox()
vbox.set_border_width(4)
vbox.set_spacing(2)
fr = Widgets.Frame("Compositing")
captions = (("Compose Type:", 'label', "Compose Type", 'combobox'),
("New Image", 'button', "Insert from Channel", 'button'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
combobox = b.compose_type
index = 0
for name in ('Alpha', 'RGB'):
combobox.append_text(name)
index += 1
combobox.set_index(1)
b.new_image.add_callback('activated', lambda w: self.new_cb())
b.new_image.set_tooltip("Start a new composite image")
b.insert_from_channel.add_callback('activated', lambda w: self.insert_cb())
b.insert_from_channel.set_tooltip("Insert channel image as layer")
zi = Viewers.CanvasView(logger=None)
zi.set_desired_size(self._wd, self._ht)
zi.enable_autozoom('on')
zi.enable_autocuts('off')
zi.cut_levels(0, 255)
zi.set_bg(0.4, 0.4, 0.4)
zi.set_name('compose_image')
self.preview_image = zi
bd = zi.get_bindings()
bd.enable_zoom(True)
bd.enable_pan(True)
bd.enable_flip(True)
bd.enable_cuts(True)
bd.enable_cmap(True)
iw = Viewers.GingaViewerWidget(zi)
iw.resize(self._wd, self._ht)
zi.get_canvas().add(self.canvas)
self.canvas.set_surface(zi)
self.canvas.ui_set_active(True, viewer=zi)
fr = Widgets.Frame("Preview")
fr.set_widget(iw)
vpaned = Widgets.Splitter(orientation='vertical')
self.w.splitter = vpaned
vpaned.add_widget(fr)
# spacer
vpaned.add_widget(Widgets.Label(''))
fr = Widgets.Frame("Layers")
self.w.scales = fr
fr.set_widget(Widgets.VBox())
vpaned.add_widget(fr)
vpaned.set_sizes(self._split_sizes)
vbox.add_widget(vpaned, stretch=1)
captions = (("Save Image As", 'button', "Save Path", 'entry'),
("Save to Channel", 'button'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
b.save_to_channel.add_callback('activated', lambda w: self.save_to_channel_cb())
b.save_to_channel.set_tooltip("Save composite image to channel")
b.save_image_as.add_callback('activated', lambda w: self.save_as_cb())
b.save_path.add_callback('activated', lambda *args: self.save_as_cb())
vbox.add_widget(w, stretch=0)
top.add_widget(vbox, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def _gui_config_layers(self):
# remove all old scales
self.logger.debug("removing layer alpha controls")
self.w.scales.remove_all()
self.logger.debug("building layer alpha controls")
# construct a new vbox of alpha controls
captions = []
num_layers = self.limage.num_layers()
for i in range(num_layers):
layer = self.limage.get_layer(i)
captions.append((layer.name + ':', 'label', 'layer_%d' % i, 'hscale'))
w, b = Widgets.build_info(captions)
self.w.update(b)
for i in range(num_layers):
layer = self.limage.get_layer(i)
adj = b['layer_%d' % (i)]
lower, upper = 0, 100
adj.set_limits(lower, upper, incr_value=1)
#adj.set_decimals(2)
adj.set_value(int(layer.alpha * 100.0))
adj.set_tracking(True)
adj.add_callback('value-changed', self.set_opacity_cb, i)
self.logger.debug("adding layer alpha controls")
self.w.scales.set_widget(w)
def new_cb(self):
#self.fitsimage.clear()
name = "composite%d" % (self.count)
self.limage = RGBComposeImage(logger=self.logger, order='RGB')
self.images = []
# Alpha or RGB composition?
index = self.w.compose_type.get_index()
if index == 0:
self.limage.compose = 'alpha'
else:
self.limage.compose = 'rgb'
self._gui_config_layers()
self.limage.set(name=name, nothumb=True)
def _get_layer_attributes(self, limage):
# Get layer name
idx = limage.num_layers()
if limage.compose == 'rgb':
idx = min(idx, 2)
names = ['Red', 'Green', 'Blue']
name = names[idx]
else:
name = 'layer%d' % (idx)
# Get alpha
alpha = 1.0
bnch = Bunch.Bunch(name=name, alpha=alpha, idx=idx)
return bnch
def make_reduced_image(self, image):
wd, ht = image.get_size()[:2]
res = image.get_scaled_cutout_basic(0, 0, wd, ht,
self.pct_reduce, self.pct_reduce)
sm_img = RGBImage.RGBImage(data_np=res.data, order=image.order)
return sm_img
def insert_image(self, image):
if self.limage is None:
self.new_cb()
nlayers = self.limage.num_layers()
if (self.limage.compose == 'rgb') and (nlayers >= 3):
self.fv.show_error("There are already 3 layers")
return
elif nlayers == 0:
# populate metadata from first layer
metadata = image.get_metadata()
self.limage.update_metadata(metadata)
self.images.append(image)
sm_img = self.make_reduced_image(image)
attrs = self._get_layer_attributes(self.limage)
self.limage.insert_layer(attrs.idx, sm_img, name=attrs.name,
alpha=attrs.alpha)
self._gui_config_layers()
self.logger.debug("setting layer image")
self.preview_image.set_image(self.limage)
def insert_cb(self):
image = self.fitsimage.get_image()
self.insert_image(image)
def drop_file_cb(self, viewer, paths):
self.logger.info("dropped files: %s" % str(paths))
for path in paths[:3]:
image = self.fv.load_image(path)
self.insert_image(image)
return True
def create_image(self):
# create new composed image
if self.limage.compose == 'rgb':
fimage = RGBComposeImage(logger=self.logger, order='RGB')
else:
fimage = AlphaComposeImage(logger=self.logger)
fimage.compose = self.limage.compose
name = "composite%d" % (self.count)
self.count += 1
# copy metadata
metadata = self.images[0].get_metadata()
fimage.update_metadata(metadata)
# insert original full-size images into new layer image
# only compose at the end
for i in range(self.limage.num_layers()):
layer = self.limage.get_layer(i)
fimage.insert_layer(i, self.images[i], name=layer.name,
alpha=layer.alpha, compose=False)
fimage.compose_layers()
fimage.set(name=name)
return fimage
def save_to_channel_cb(self):
fimage = self.create_image()
# and drop it in the channel
self.fv.add_image(fimage.get('name'), fimage)
def set_opacity_cb(self, w, val, idx):
alpha = val / 100.0
self.limage.set_alpha(idx, alpha)
def _alphas_controls_to_layers(self):
self.logger.debug("updating layers in %s from controls" % self.limage)
num_layers = self.limage.num_layers()
vals = []
for i in range(num_layers):
alpha = self.w['layer_%d' % i].get_value() / 100.0
vals.append(alpha)
self.logger.debug("%d: alpha=%f" % (i, alpha))
i += 1
self.limage.set_alphas(vals)
def _alphas_layers_to_controls(self):
self.logger.debug("updating controls from %s" % self.limage)
num_layers = self.limage.num_layers()
for i in range(num_layers):
layer = self.limage.get_layer(i)
self.logger.debug("%d: alpha=%f" % (i, layer.alpha))
ctrlname = 'layer_%d' % (i)
if ctrlname in self.w:
self.w[ctrlname].set_value(layer.alpha * 100.0)
i += 1
def save_alpha_as_file(self, path):
fimage = self.create_image()
fimage.save_as_file(path)
def save_rgb_as_file(self, path):
fimage = self.create_image()
data = fimage.get_data()
# Save image using PIL
p_image = Image.fromarray(data)
p_image.save(path)
def save_as_cb(self):
if self.limage is None:
self.fv.show_error("Please create a composite image first.")
return
path = str(self.w.save_path.get_text()).strip()
if not path.startswith('/'):
path = os.path.join('.', path)
if self.limage.compose == 'rgb':
self.fv.nongui_do(self.fv.error_wrap, self.save_rgb_as_file, path)
else:
self.fv.nongui_do(self.fv.error_wrap, self.save_alpha_as_file, path)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.resume()
def pause(self):
pass
def resume(self):
pass
def stop(self):
self._split_sizes = self.w.splitter.get_sizes()
self.limage = None
self.images = []
self.fv.show_status("")
self.gui_up = False
def redo(self):
pass
def __str__(self):
return 'compose'
# END
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid as stdlib_uuid
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import views
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
'v2.1': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
"version": "2.5",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1",
}
],
}
}
class VersionsTestV20(test.NoDBTestCase):
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v2.1",
"status": "CURRENT",
"version": "2.5",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def _test_get_version_2_detail(self, url, accept=None):
if accept is None:
accept = "application/json"
req = webob.Request.blank(url)
req.accept = accept
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail(self):
self._test_get_version_2_detail('/v2/')
def test_get_version_2_detail_content_type(self):
accept = "application/json;version=2"
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
req = webob.Request.blank('/v2/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(actual, expected)
def test_generate_href_v21(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2.1/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2.1')
self.assertEqual(actual, expected)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(actual, expected)
# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
# So this class tests "/v2.1" only for v2.1 API.
class VersionsTestV21(test.NoDBTestCase):
exp_versions = copy.deepcopy(EXP_VERSIONS)
exp_versions['v2.0']['links'].insert(0,
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.0']}
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
req = webob.Request.blank('/v2.1/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 404)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
|
|
from datetime import datetime, timedelta
from django.core import mail
from django.utils import timezone
from djblets.util.dates import get_tz_aware_utcnow
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
PERMISSION_DENIED)
from djblets.webapi.testing.decorators import webapi_test_template
from kgb import SpyAgency, spy_on
from reviewboard.reviews.models import Review, ReviewRequest
from reviewboard.reviews.signals import review_ship_it_revoking
from reviewboard.webapi.errors import REVOKE_SHIP_IT_ERROR
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (review_list_mimetype,
review_item_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin,
ReviewListMixin)
from reviewboard.webapi.tests.urls import (get_review_item_url,
get_review_list_url)
class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin,
BaseWebAPITestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/'
resource = resources.review
def setup_review_request_child_test(self, review_request):
return (get_review_list_url(review_request),
review_list_mimetype)
def compare_item(self, item_rsp, review):
self.assertEqual(item_rsp['id'], review.pk)
self.assertEqual(item_rsp['ship_it'], review.ship_it)
self.assertEqual(item_rsp['body_top'], review.body_top)
self.assertEqual(item_rsp['body_bottom'], review.body_bottom)
if review.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if review.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
self.assertEqual(item_rsp['absolute_url'],
self.base_url + review.get_absolute_url())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
if populate_items:
items = [self.create_review(review_request, publish=True)]
else:
items = []
return (get_review_list_url(review_request, local_site_name),
review_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the GET review-requests/<id>/reviews/?counts-only=1 API"""
review_request = self.create_review_request(publish=True)
self.create_review(review_request, publish=True)
self.create_review(review_request, publish=True)
rsp = self.api_get(get_review_list_url(review_request), {
'counts-only': 1,
}, expected_mimetype=review_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], 2)
def test_get_with_invite_only_group_and_permission_denied_error(self):
"""Testing the GET review-requests/<id>/reviews/ API
with invite-only group and Permission Denied error
"""
review_request = self.create_review_request(publish=True)
self.assertNotEqual(review_request.submitter, self.user)
group = self.create_review_group(invite_only=True)
review_request.target_groups.add(group)
review_request.save()
rsp = self.api_get(get_review_list_url(review_request),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
return (
get_review_list_url(review_request, local_site_name),
review_item_mimetype,
{
'ship_it': True,
'body_top': 'My body top',
'body_bottom': 'My body bottom',
},
[review_request])
def check_post_result(self, user, rsp, review_request):
review = Review.objects.get(pk=rsp['review']['id'])
self.assertFalse(review.rich_text)
self.compare_item(rsp['review'], review)
class ResourceItemTests(SpyAgency, ReviewItemMixin,
ReviewRequestChildItemMixin, BaseWebAPITestCase,
metaclass=BasicTestsMetaclass):
"""Testing the ReviewResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/'
resource = resources.review
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
return (get_review_item_url(review_request, review.pk),
review_item_mimetype)
def compare_item(self, item_rsp, review):
self.assertEqual(item_rsp['id'], review.pk)
self.assertEqual(item_rsp['ship_it'], review.ship_it)
self.assertEqual(item_rsp['body_top'], review.body_top)
self.assertEqual(item_rsp['body_bottom'], review.body_bottom)
if review.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if review.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
return (get_review_item_url(review_request, review.pk,
local_site_name),
[review, review_request])
def check_delete_result(self, user, review, review_request):
self.assertNotIn(review, review_request.reviews.all())
def test_delete_with_published_review(self):
"""Testing the DELETE review-requests/<id>/reviews/<id>/ API
with pre-published review
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=self.user,
publish=True)
self.api_delete(get_review_item_url(review_request, review.id),
expected_status=403)
self.assertEqual(review_request.reviews.count(), 1)
def test_delete_with_does_not_exist(self):
"""Testing the DELETE review-requests/<id>/reviews/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
rsp = self.api_delete(get_review_item_url(review_request, 919239),
expected_status=404)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
return (get_review_item_url(review_request, review.pk,
local_site_name),
review_item_mimetype,
review)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/reviews/<id>/ API
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
self._testHttpCaching(
get_review_item_url(review_request, review.pk),
check_etags=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
return (get_review_item_url(review_request, review.pk,
local_site_name),
review_item_mimetype,
{'body_top': 'New body top'},
review,
[])
def check_put_result(self, user, item_rsp, review, *args):
self.assertEqual(item_rsp['id'], review.pk)
self.assertEqual(item_rsp['body_top'], 'New body top')
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
review = Review.objects.get(pk=review.pk)
self.compare_item(item_rsp, review)
def test_put_with_published_review(self):
"""Testing the PUT review-requests/<id>/reviews/<id>/ API
with pre-published review
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=self.user,
publish=True)
self.api_put(
get_review_item_url(review_request, review.id),
{'body_top': 'foo'},
expected_status=403)
@webapi_test_template
def test_put_with_public_and_ship_it_true(self):
"""Testing the PUT <URL> API with pre-published review and
ship_it=true
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=self.user,
publish=True)
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': True},
expected_status=400)
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['fields'], {
'ship_it': 'Published reviews cannot be updated with ship_it=true',
})
@webapi_test_template
def test_put_with_revoke_ship_it(self):
"""Testing the PUT <URL> API with revoking Ship It
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=self.user,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True)
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': False},
expected_mimetype=review_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['review']['body_top'],
Review.REVOKED_SHIP_IT_TEXT)
self.assertFalse(rsp['review']['ship_it'])
self.assertTrue(rsp['review']['extra_data'].get('revoked_ship_it'))
@webapi_test_template
def test_put_with_revoke_ship_it_and_no_permission(self):
"""Testing the PUT <URL> API with revoking Ship It and no permission"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
ship_it=True,
publish=True)
self.assertNotEqual(review.user, self.user)
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': False},
expected_status=403)
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
@webapi_test_template
def test_put_with_revoke_ship_it_and_not_ship_it(self):
"""Testing the PUT <URL> API with revoking Ship It on a review not
marked Ship It
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=self.user,
publish=True)
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': False},
expected_status=400)
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['fields'], {
'ship_it': 'This review is not marked Ship It!',
})
@webapi_test_template
def test_put_with_revoke_ship_it_and_revoke_error(self):
"""Testing the PUT <URL> API with revoking Ship It and handling a
revocation error
"""
def on_revoking(**kwargs):
raise Exception('oh no')
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request,
user=self.user,
ship_it=True,
publish=True)
try:
review_ship_it_revoking.connect(on_revoking)
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': False},
expected_status=500)
finally:
review_ship_it_revoking.disconnect(on_revoking)
self.assertEqual(rsp['err']['code'], REVOKE_SHIP_IT_ERROR.code)
self.assertEqual(rsp['err']['msg'],
'Error revoking the Ship It: oh no')
@webapi_test_template
def test_put_revoke_ship_it_timestamp(self):
"""Testing the PUT <URL> API with revoking Ship It does not update
timestamp
"""
# ReviewRequest.last_update is a
# django.db.fields.ModificationTimestampField, which retrieves its
# value from datetime.utcnow().replace(tzinfo=utc).
#
# django.utils.timezone.now has the same implementation.
#
# Unfortunately, we cannot spy on datetime.utcnow since it is a
# builtin. So we replace get_tz_aware_utcnow with timezone.now and we
# will replace that with a constant function in the spy_on calls below.
self.spy_on(get_tz_aware_utcnow, call_fake=lambda: timezone.now())
creation_timestamp = datetime.fromtimestamp(0, timezone.utc)
review_timestamp = creation_timestamp + timedelta(hours=1)
revoke_timestamp = review_timestamp + timedelta(hours=1)
with spy_on(timezone.now, call_fake=lambda: creation_timestamp):
review_request = self.create_review_request(publish=True,
submitter=self.user)
with spy_on(timezone.now, call_fake=lambda: review_timestamp):
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
ship_it=True,
publish=True,
user=self.user)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertEqual(review_request.time_added, creation_timestamp)
self.assertEqual(review_request.last_updated, review_timestamp)
self.assertEqual(review.timestamp, review_timestamp)
with spy_on(timezone.now, call_fake=lambda: revoke_timestamp):
rsp = self.api_put(
get_review_item_url(review_request, review.pk),
{'ship_it': False},
expected_mimetype=review_item_mimetype,
)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
review = Review.objects.get(pk=review.pk)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertEqual(review_request.time_added, creation_timestamp)
self.assertEqual(review_request.last_updated, review_timestamp)
self.assertEqual(review.timestamp, review_timestamp)
@add_fixtures(['test_site'])
def test_put_publish(self):
"""Testing the PUT review-requests/<id>/reviews/<id>/?public=1 API"""
body_top = "My Body Top"
body_bottom = ""
ship_it = True
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=self.user)
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
self.api_put(
get_review_item_url(review_request, review.pk),
{
'public': True,
'ship_it': ship_it,
'body_top': body_top,
'body_bottom': body_bottom,
},
expected_mimetype=review_item_mimetype)
reviews = review_request.reviews.filter(user=self.user)
self.assertEqual(len(reviews), 1)
review = reviews[0]
self.assertEqual(review.ship_it, ship_it)
self.assertEqual(review.body_top, body_top)
self.assertEqual(review.body_bottom, body_bottom)
self.assertEqual(review.public, True)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: %s'
% (review_request.display_id, review_request.summary))
self.assertValidRecipients([
review_request.submitter.username,
self.user.username,
])
|
|
#!/usr/bin/env python
import time
from tables import *
class Small(IsDescription):
var1 = StringCol(itemsize=4)
var2 = Int32Col()
var3 = Float64Col()
var4 = BoolCol()
# Define a user record to characterize some kind of particles
class Medium(IsDescription):
var1 = StringCol(itemsize=16) # 16-character String
#float1 = Float64Col(dflt=2.3)
#float2 = Float64Col(dflt=2.3)
#zADCcount = Int16Col() # signed short integer
var2 = Int32Col() # signed short integer
var3 = Float64Col()
grid_i = Int32Col() # integer
grid_j = Int32Col() # integer
pressure = Float32Col() # float (single-precision)
energy = Float64Col(shape=2) # double (double-precision)
def createFile(filename, nrows, filters, atom, recsize, index, verbose):
# Open a file in "w"rite mode
fileh = openFile(filename, mode = "w", title="Searchsorted Benchmark",
filters=filters)
title = "This is the IndexArray title"
# Create an IndexArray instance
rowswritten = 0
# Create an entry
klass = {"small":Small, "medium":Medium}
table = fileh.createTable(fileh.root, 'table', klass[recsize], title,
None, nrows)
for i in xrange(nrows):
#table.row['var1'] = str(i)
#table.row['var2'] = random.randrange(nrows)
table.row['var2'] = i
table.row['var3'] = i
#table.row['var4'] = i % 2
#table.row['var4'] = i > 2
table.row.append()
rowswritten += nrows
table.flush()
rowsize = table.rowsize
indexrows = 0
# Index one entry:
if index:
if atom == "string":
indexrows = table.cols.var1.createIndex()
elif atom == "bool":
indexrows = table.cols.var4.createIndex()
elif atom == "int":
indexrows = table.cols.var2.createIndex()
elif atom == "float":
indexrows = table.cols.var3.createIndex()
else:
raise ValueError("Index type not supported yet")
if verbose:
print "Number of indexed rows:", indexrows
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowswritten, rowsize)
def readFile(filename, atom, niter, verbose):
# Open the HDF5 file in read-only mode
fileh = openFile(filename, mode = "r")
table = fileh.root.table
print "reading", table
if atom == "string":
idxcol = table.cols.var1.index
elif atom == "bool":
idxcol = table.cols.var4.index
elif atom == "int":
idxcol = table.cols.var2.index
else:
idxcol = table.cols.var3.index
if verbose:
print "Max rows in buf:", table.nrowsinbuf
print "Rows in", table._v_pathname, ":", table.nrows
print "Buffersize:", table.rowsize * table.nrowsinbuf
print "MaxTuples:", table.nrowsinbuf
print "Chunk size:", idxcol.sorted.chunksize
print "Number of elements per slice:", idxcol.nelemslice
print "Slice number in", table._v_pathname, ":", idxcol.nrows
rowselected = 0
if atom == "string":
for i in xrange(niter):
#results = [table.row["var3"] for i in table.where(2+i<=table.cols.var2 < 10+i)]
# results = [table.row.nrow() for i in table.where(2<=table.cols.var2 < 10)]
results = [p["var1"] #p.nrow()
for p in table.where(table.cols.var1 == "1111")]
# for p in table.where("1000"<=table.cols.var1<="1010")]
rowselected += len(results)
elif atom == "bool":
for i in xrange(niter):
results = [p["var2"] #p.nrow()
for p in table.where(table.cols.var4==0)]
rowselected += len(results)
elif atom == "int":
for i in xrange(niter):
#results = [table.row["var3"] for i in table.where(2+i<=table.cols.var2 < 10+i)]
# results = [table.row.nrow() for i in table.where(2<=table.cols.var2 < 10)]
results = [p["var2"] #p.nrow()
# for p in table.where(110*i<=table.cols.var2<110*(i+1))]
# for p in table.where(1000-30<table.cols.var2<1000+60)]
for p in table.where(table.cols.var2<=400)]
rowselected += len(results)
elif atom == "float":
for i in xrange(niter):
# results = [(table.row.nrow(), table.row["var3"])
# for i in table.where(3<=table.cols.var3 < 5.)]
# results = [(p.nrow(), p["var3"])
# for p in table.where(1000.-i<=table.cols.var3<1000.+i)]
results = [p["var3"] # (p.nrow(), p["var3"])
for p in table.where(100*i<=table.cols.var3<100*(i+1))]
# for p in table
# if 100*i<=p["var3"]<100*(i+1)]
# results = [ (p.nrow(), p["var3"]) for p in table
# if (1000.-i <= p["var3"] < 1000.+i) ]
rowselected += len(results)
else:
raise ValueError("Unsuported atom value")
if verbose and 1:
print "Values that fullfill the conditions:"
print results
rowsread = table.nrows * niter
rowsize = table.rowsize
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowsread, rowselected, rowsize)
def searchFile(filename, atom, verbose, item):
# Open the HDF5 file in read-only mode
fileh = openFile(filename, mode = "r")
rowsread = 0
uncomprBytes = 0
table = fileh.root.table
if atom == "int":
idxcol = table.cols.var2.index
elif atom == "float":
idxcol = table.cols.var3.index
else:
raise ValueError("Unsuported atom value")
print "Searching", table, "..."
if verbose:
print "Chunk size:", idxcol.sorted.chunksize
print "Number of elements per slice:", idxcol.sorted.nelemslice
print "Slice number in", table._v_pathname, ":", idxcol.sorted.nrows
(positions, niter) = idxcol.search(item)
if verbose:
print "Positions for item", item, "==>", positions
print "Total iterations in search:", niter
rowsread += table.nrows
uncomprBytes += idxcol.sorted.chunksize * niter * idxcol.sorted.itemsize
results = table.read(coords=positions)
print "results length:", len(results)
if verbose:
print "Values that fullfill the conditions:"
print results
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowsread, uncomprBytes, niter)
if __name__=="__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
usage = """usage: %s [-v] [-p] [-R range] [-r] [-w] [-s recsize ] [-a
atom] [-c level] [-l complib] [-S] [-F] [-i item] [-n nrows] [-x]
[-k niter] file
-v verbose
-p use "psyco" if available
-R select a range in a field in the form "start,stop,step"
-r only read test
-w only write test
-s record size
-a use [float], [int], [bool] or [string] atom
-c sets a compression level (do not set it or 0 for no compression)
-S activate shuffling filter
-F activate fletcher32 filter
-l sets the compression library to be used ("zlib", "lzo", "ucl", "bzip2")
-i item to search
-n set the number of rows in tables
-x don't make indexes
-k number of iterations for reading\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpSFR:rwxk:s:a:c:l:i:n:')
except:
sys.stderr.write(usage)
sys.exit(0)
# if we pass too much parameters, abort
if len(pargs) != 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
rng = None
item = None
atom = "int"
fieldName = None
testread = 1
testwrite = 1
usepsyco = 0
complevel = 0
shuffle = 0
fletcher32 = 0
complib = "zlib"
nrows = 100
recsize = "small"
index = 1
niter = 1
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
if option[0] == '-p':
usepsyco = 1
if option[0] == '-S':
shuffle = 1
if option[0] == '-F':
fletcher32 = 1
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-r':
testwrite = 0
elif option[0] == '-w':
testread = 0
elif option[0] == '-x':
index = 0
elif option[0] == '-s':
recsize = option[1]
elif option[0] == '-a':
atom = option[1]
if atom not in ["float", "int", "bool", "string"]:
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-c':
complevel = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-i':
item = eval(option[1])
elif option[0] == '-n':
nrows = int(option[1])
elif option[0] == '-k':
niter = int(option[1])
# Build the Filters instance
filters = Filters(complevel=complevel, complib=complib,
shuffle=shuffle, fletcher32=fletcher32)
# Catch the hdf5 file passed as the last argument
file = pargs[0]
if testwrite:
print "Compression level:", complevel
if complevel > 0:
print "Compression library:", complib
if shuffle:
print "Suffling..."
t1 = time.time()
cpu1 = time.clock()
if psyco_imported and usepsyco:
psyco.bind(createFile)
(rowsw, rowsz) = createFile(file, nrows, filters,
atom, recsize, index, verbose)
t2 = time.time()
cpu2 = time.clock()
tapprows = round(t2-t1, 3)
cpuapprows = round(cpu2-cpu1, 3)
tpercent = int(round(cpuapprows/tapprows, 2)*100)
print "Rows written:", rowsw, " Row size:", rowsz
print "Time writing rows: %s s (real) %s s (cpu) %s%%" % \
(tapprows, cpuapprows, tpercent)
print "Write rows/sec: ", int(rowsw / float(tapprows))
print "Write KB/s :", int(rowsw * rowsz / (tapprows * 1024))
if testread:
if psyco_imported and usepsyco:
psyco.bind(readFile)
psyco.bind(searchFile)
t1 = time.time()
cpu1 = time.clock()
if rng or item:
(rowsr, uncomprB, niter) = searchFile(file, atom, verbose, item)
else:
for i in range(1):
(rowsr, rowsel, rowsz) = readFile(file, atom, niter, verbose)
t2 = time.time()
cpu2 = time.clock()
treadrows = round(t2-t1, 3)
cpureadrows = round(cpu2-cpu1, 3)
tpercent = int(round(cpureadrows/treadrows, 2)*100)
tMrows = rowsr/(1000*1000.)
sKrows = rowsel/1000.
print "Rows read:", rowsr, "Mread:", round(tMrows, 3), "Mrows"
print "Rows selected:", rowsel, "Ksel:", round(sKrows, 3), "Krows"
print "Time reading rows: %s s (real) %s s (cpu) %s%%" % \
(treadrows, cpureadrows, tpercent)
print "Read Mrows/sec: ", round(tMrows / float(treadrows), 3)
#print "Read KB/s :", int(rowsr * rowsz / (treadrows * 1024))
# print "Uncompr MB :", int(uncomprB / (1024 * 1024))
# print "Uncompr MB/s :", int(uncomprB / (treadrows * 1024 * 1024))
# print "Total chunks uncompr :", int(niter)
|
|
#!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import sys
import traceback
from collections import OrderedDict
def merge_schema(directory, definitions, to_merge):
for schema in to_merge:
if not '$ref' in schema:
raise ValueError("no $ref in allOf")
path, link = schema['$ref'].split('#')
ref = load_json_schema(directory, path)
defnref = link.split('/')[-1]
definitions.update(ref[defnref])
def load_json_schema(directory, path, schemas={}):
if path in schemas:
return schemas[path]
data = json.load(open(os.path.join(directory, path), "r", encoding='UTF-8'))
if not data['$schema'].startswith("http://json-schema.org/"):
raise ValueError("not a JSON schema")
definitions = data.get("definitions", {})
if not definitions:
raise ValueError("empty definition block")
if not 'title' in data:
raise ValueError("JSON schema without title")
required = set(data.get('required', []))
for rt, descr in definitions.items():
if 'allOf' in descr:
merge_schema(directory, descr, descr['allOf'])
del descr['allOf']
if 'properties' in descr:
for field, props in descr['properties'].items():
doc = props.get('description', '')
props['read_only'] = doc.startswith('ReadOnly,')
props['required'] = field in required
if props['read_only']:
props['description'] = props['description'][len('ReadOnly,'):].strip()
descr['title'] = data['title']
schemas[path] = definitions
return definitions
JSON_TO_C = {
"string": "char *",
"integer": "int32_t",
"boolean": "bool",
"number": "double"
}
JSON_TO_C_TMP = {}
JSON_TO_C_TMP.update(JSON_TO_C)
JSON_TO_C_TMP['number'] = "double"
JSON_TO_FLOW_GET_PKT = {
"string": "sol_flow_packet_get_string",
"integer": "sol_flow_packet_get_irange_value",
"boolean": "sol_flow_packet_get_boolean",
"number": "sol_flow_packet_get_drange_value"
}
JSON_TO_FLOW_CHECK_UPDATED = {
"string": "check_updated_string",
"integer": "check_updated_int32",
"boolean": "check_updated_boolean",
"number": "check_updated_number"
}
JSON_TO_FLOW_SEND_PKT = {
"string": "send_string_packet",
"integer": "sol_flow_send_irange_value_packet",
"boolean": "sol_flow_send_boolean_packet",
"number": "sol_flow_send_drange_value_packet"
}
JSON_TO_INIT = {
"string": "NULL",
"integer": "0",
"boolean": "false",
"number": "0.0f"
}
JSON_TO_SOL_JSON = {
"string": "string",
"integer": "int",
"boolean": "boolean",
"number": "float"
}
def props_are_equivalent(p1, p2):
# This disconsiders comments
p1 = {k: get_type_from_property(v) for k, v in p1.items()}
p2 = {k: get_type_from_property(v) for k, v in p2.items()}
return p1 == p2
def object_fields_common_c(state_struct_name, name, props):
fields = []
for prop_name, descr in props.items():
doc = '/* %s */' % descr.get('description', '???')
if 'enum' in descr:
var_type = 'enum %s_%s' % (state_struct_name, prop_name)
else:
var_type = JSON_TO_C[descr['type']]
fields.append("%s %s; %s" % (var_type, prop_name, doc))
return '\n'.join(fields)
def generate_object_to_repr_vec_fn_common_c(state_struct_name, name, props, client):
fields = []
for prop_name, prop_descr in props.items():
if client and prop_descr['read_only']:
continue
if 'enum' in prop_descr:
tbl = '%s_%s_tbl' % (state_struct_name, prop_name)
val = '%s[state->state.%s].key' % (tbl, prop_name)
vallen = '%s[state->state.%s].len' % val
ftype = 'SOL_OIC_REPR_TEXT_STRING'
fargs = (val, vallen)
elif prop_descr['type'] == 'boolean':
val = 'state->state.%s' % prop_name
ftype = 'SOL_OIC_REPR_BOOLEAN'
fargs = (val, )
elif prop_descr['type'] == 'string':
val = 'state->state.%s' % prop_name
vallen = '%s ? strlen(%s) : 0' % (val, val)
ftype = 'SOL_OIC_REPR_TEXT_STRING'
fargs = (val, vallen)
elif prop_descr['type'] == 'integer':
val = 'state->state.%s' % prop_name
ftype = 'SOL_OIC_REPR_INT'
fargs = (val, )
elif prop_descr['type'] == 'number':
val = 'state->state.%s' % prop_name
ftype = 'SOL_OIC_REPR_DOUBLE'
fargs = (val, )
else:
raise ValueError('unknown field type: %s' % prop['type'])
vars = {
'ftype': ftype,
'key': prop_name,
'fargs': ', '.join(fargs)
}
fields.append('''ret = sol_oic_map_append(repr_map, &%(ftype)s("%(key)s", %(fargs)s));
SOL_EXP_CHECK(!ret, false);
''' % vars)
if not fields:
return ''
return '''static bool
%(struct_name)s_to_repr_vec(void *data, struct sol_oic_map_writer *repr_map)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)data;
bool ret;
%(fields)s
return true;
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'fields': '\n'.join(fields)
}
def get_type_from_property(prop):
if 'type' in prop:
return prop['type']
if 'enum' in prop:
return 'enum:%s' % ','.join(prop['enum'])
raise ValueError('Unknown type for property')
def object_to_repr_vec_fn_common_c(state_struct_name, name, props, client, equivalent={}):
for item_name, item_props in equivalent.items():
if item_props[0] == client and props_are_equivalent(props, item_props[1]):
return '''static bool
%(struct_name)s_to_repr_vec(void *data, struct sol_oic_map_writer *repr_map_encoder)
{
return %(item_name)s_to_repr_vec(data, repr_map_encoder); /* %(item_name)s is equivalent to %(struct_name)s */
}
''' % {
'item_name': item_name,
'struct_name': name,
'type': 'client' if client else 'server'
}
equivalent[name] = (client, props)
return generate_object_to_repr_vec_fn_common_c(state_struct_name, name, props, client)
def object_to_repr_vec_fn_client_c(state_struct_name, name, props):
return object_to_repr_vec_fn_common_c(state_struct_name, name, props, True)
def object_to_repr_vec_fn_server_c(state_struct_name, name, props):
return object_to_repr_vec_fn_common_c(state_struct_name, name, props, False)
def get_field_integer_client_c(id, name, prop):
return '''
if (decode_mask & (1<<%(id)d) && streq(field.key, "%(field_name)s")) {
if (field.type == SOL_OIC_REPR_TYPE_UINT)
fields.%(field_name)s = field.v_uint;
else if (field.type == SOL_OIC_REPR_TYPE_INT)
fields.%(field_name)s = field.v_int;
else if (field.type == SOL_OIC_REPR_TYPE_SIMPLE)
fields.%(field_name)s = field.v_simple;
else
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_number_client_c(id, name, prop):
return '''
if (decode_mask & (1<<%(id)d) && streq(field.key, "%(field_name)s")) {
if (field.type == SOL_OIC_REPR_TYPE_DOUBLE)
fields.%(field_name)s = field.v_double;
else if (field.type == SOL_OIC_REPR_TYPE_FLOAT)
fields.%(field_name)s = field.v_float;
else
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_string_client_c(id, name, prop):
return '''
if (decode_mask & (1<<%(id)d) && streq(field.key, "%(field_name)s")) {
if (field.type != SOL_OIC_REPR_TYPE_TEXT_STRING)
RETURN_ERROR(-EINVAL);
if (sol_util_replace_str_from_slice_if_changed(&fields.%(field_name)s, field.v_slice) < 0)
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_boolean_client_c(id, name, prop):
return '''
if (decode_mask & (1<<%(id)d) && streq(field.key, "%(field_name)s")) {
if (field.type != SOL_OIC_REPR_TYPE_BOOLEAN)
RETURN_ERROR(-EINVAL);
fields.%(field_name)s = field.v_boolean;
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_enum_client_c(id, struct_name, name, prop):
return '''
if (decode_mask & (1<<%(id)d) && streq(field.key, "%(field_name)s")) {
int val;
if (field.type != SOL_OIC_REPR_TYPE_TEXT_STRING)
RETURN_ERROR(-EINVAL);
val = sol_str_table_lookup_fallback(%(struct_name)s_%(field_name)s_tbl,
field.v_slice, -1);
if (val < 0)
RETURN_ERROR(-EINVAL);
fields.%(field_name)s = (enum %(struct_name)s_%(field_name)s)val;
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'struct_name': struct_name,
'field_name': name,
'field_name_len': len(name),
'id': id
}
def object_fields_from_repr_vec(name, props):
fields = []
type_to_fn = {
'string': get_field_string_client_c,
'integer': get_field_integer_client_c,
'number': get_field_number_client_c,
'boolean': get_field_boolean_client_c,
}
for id, (prop_name, prop) in enumerate(props.items()):
if 'enum' in prop:
fields.append(get_field_enum_client_c(id, name, prop_name, prop))
else:
fields.append(type_to_fn[prop['type']](id, prop_name, prop))
return '\n'.join(fields)
def generate_object_from_repr_vec_fn_common_c(name, props):
fields_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fields_init.append(' .%(name)s = state->%(name)s,' % {"name": field_name})
elif field_props['type'] == 'string':
fields_init.append(' .%(name)s = state->%(name)s ? strdup(state->%(name)s) : NULL,' % {"name": field_name})
else:
fields_init.append(' .%(name)s = state->%(name)s,' % {"name": field_name})
fields_free = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
fields_free.append(' free(fields.%s);' % (field_name))
update_state = []
for field_name, field_props in props.items():
if not 'enum' in field_props:
if field_props.get('type') == 'string':
update_state.append("""\
if (check_updated_string(state->%(name)s, fields.%(name)s)) {
free(state->%(name)s);\
""" % {"name": field_name})
else:
update_state.append("""\
if (%(c_check_updated)s(state->%(name)s, fields.%(name)s)) {\
""" % {"name": field_name,
"c_check_updated": JSON_TO_FLOW_CHECK_UPDATED[field_props['type']]})
else:
update_state.append("""\
if (%(c_check_updated)s(state->%(name)s, fields.%(name)s)) {\
""" % {"name": field_name,
"c_check_updated": JSON_TO_FLOW_CHECK_UPDATED["integer"]})
update_state.append("""\
state->%(name)s = fields.%(name)s;
updated = true;
}
""" % {"name": field_name})
return '''static int
%(struct_name)s_from_repr_vec(struct %(struct_name)s *state,
const struct sol_oic_map_reader *repr_vec, uint32_t decode_mask)
{
struct sol_oic_repr_field field;
enum sol_oic_map_loop_reason end_reason;
struct sol_oic_map_reader iterator;
struct %(struct_name)s fields = {
%(fields_init)s
};
bool updated = false;
int ret = 0;
SOL_OIC_MAP_LOOP(repr_vec, &field, &iterator, end_reason) {
%(fields)s
}
if (end_reason != SOL_OIC_MAP_LOOP_OK)
goto out;
%(update_state)s
if (!updated)
goto out;
return 1;
out:
%(free_fields)s
return ret;
}
''' % {
'struct_name': name,
'fields_init': '\n'.join(fields_init),
'fields': object_fields_from_repr_vec(name, props),
'free_fields': '\n'.join(fields_free),
'update_state': '\n'.join(update_state)
}
def object_from_repr_vec_fn_common_c(name, props, equivalent={}):
for item_name, item_props in equivalent.items():
if props_are_equivalent(props, item_props):
return '''static int
%(struct_name)s_from_repr_vec(struct %(struct_name)s *state,
const struct sol_oic_map_reader *repr_map, uint32_t decode_mask)
{
/* %(item_name)s is equivalent to %(struct_name)s */
return %(item_name)s_from_repr_vec((struct %(item_name)s *)state, repr_map, decode_mask);
}
''' % {
'item_name': item_name,
'struct_name': name
}
equivalent[name] = props
return generate_object_from_repr_vec_fn_common_c(name, props)
def object_from_repr_vec_fn_client_c(state_struct_name, name, props):
return '''static int
%(struct_name)s_from_repr_vec(struct client_resource *resource, const struct sol_oic_map_reader *repr_vec)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_from_repr_vec(&res->state, repr_vec, ~0);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name
}
def object_from_repr_vec_fn_server_c(state_struct_name, name, props):
decode_mask = 0
id = 0
for field_name, field_props in props.items():
if not field_props['read_only']:
decode_mask |= 1<<id
id += 1
if not decode_mask:
return ''
return '''static int
%(struct_name)s_from_repr_vec(struct server_resource *resource, const struct sol_oic_map_reader *repr_vec)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_from_repr_vec(&res->state, repr_vec, 0x%(decode_mask)x);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name,
'decode_mask': decode_mask
}
def object_inform_flow_fn_common_c(state_struct_name, name, props, client):
send_flow_pkts = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fn = 'sol_flow_send_string_packet'
val = '%(struct_name)s_%(field_name)s_tbl[state->state.%(field_name)s].key' % {
'struct_name': state_struct_name,
'field_name': field_name
}
else:
fn = JSON_TO_FLOW_SEND_PKT[field_props['type']]
val = 'state->state.%(field_name)s' % {
'field_name': field_name
}
send_flow_pkts.append('''%(flow_send_fn)s(resource->node, SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__%(FIELD_NAME)s, %(val)s);''' % {
'flow_send_fn': fn,
'STRUCT_NAME': name.upper(),
'FIELD_NAME': field_name.upper(),
'val': val
})
return '''static void %(struct_name)s_inform_flow(struct %(type)s_resource *resource)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)resource;
%(send_flow_pkts)s
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'send_flow_pkts': '\n'.join(send_flow_pkts)
}
def object_inform_flow_fn_client_c(state_struct_name, name, props):
return object_inform_flow_fn_common_c(state_struct_name, name, props, True)
def object_inform_flow_fn_server_c(state_struct_name, name, props):
read_only = all(field_props['read_only'] for field_name, field_props in props.items())
return '' if read_only else object_inform_flow_fn_common_c(state_struct_name, name, props, False)
def object_open_fn_client_c(state_struct_name, resource_type, name, props):
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
to_repr_vec_fn = 'NULL'
else:
to_repr_vec_fn = '%s_to_repr_vec' % name
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
const struct sol_flow_node_type_%(struct_name)s_options *node_opts =
(const struct sol_flow_node_type_%(struct_name)s_options *)options;
static const struct client_resource_funcs funcs = {
.to_repr_vec = %(to_repr_vec_fn)s,
.from_repr_vec = %(struct_name)s_from_repr_vec,
.inform_flow = %(struct_name)s_inform_flow,
.found_port = SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__FOUND,
.device_id_port = SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__DEVICE_ID
};
struct %(struct_name)s *resource = data;
int r;
r = client_resource_init(node, &resource->base, "%(resource_type)s", &funcs);
SOL_INT_CHECK(r, < 0, r);
%(field_init)s
return client_connect(&resource->base, node_opts->device_id);
}
''' % {
'struct_name': name,
'STRUCT_NAME': name.upper(),
'resource_type': resource_type,
'field_init': '\n'.join(field_init),
'to_repr_vec_fn': to_repr_vec_fn
}
def object_open_fn_server_c(state_struct_name, resource_type, name, props, definitions={'id':0}):
def_id = definitions['id']
definitions['id'] += 1
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
from_repr_vec_fn_name = 'NULL'
inform_flow_fn_name = 'NULL'
else:
from_repr_vec_fn_name = '%s_from_repr_vec' % name
inform_flow_fn_name = '%s_inform_flow' % name
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
static const struct sol_str_slice rt_slice = SOL_STR_SLICE_LITERAL("%(resource_type)s");
static const struct server_resource_funcs funcs = {
.to_repr_vec = %(struct_name)s_to_repr_vec,
.from_repr_vec = %(from_repr_vec_fn_name)s,
.inform_flow = %(inform_flow_fn_name)s
};
struct %(struct_name)s *resource = data;
int r;
r = server_resource_init(&resource->base, node, rt_slice, &funcs);
if (!r) {
%(field_init)s
}
return r;
}
''' % {
'struct_name': name,
'resource_type': resource_type,
'def_id': def_id,
'from_repr_vec_fn_name': from_repr_vec_fn_name,
'inform_flow_fn_name': inform_flow_fn_name,
'field_init': '\n'.join(field_init)
}
def object_close_fn_client_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
client_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_close_fn_server_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
server_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_setters_fn_common_c(state_struct_name, name, props, client):
fields = []
for field, descr in props.items():
if client and descr['read_only']:
continue
if 'enum' in descr:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
const char *var;
if (!sol_flow_packet_get_string(packet, &var)) {
int16_t val = sol_str_table_lookup_fallback(%(state_struct_name)s_%(field_name)s_tbl,
sol_str_slice_from_str(var), -1);
if (val >= 0) {
resource->state.%(field_name)s = (enum %(state_struct_name)s_%(field_name)s)val;
%(type)s_resource_schedule_update(&resource->base);
return 0;
}
return -ENOENT;
}
return -EINVAL;
}
''' % {
'field_name': field,
'FIELD_NAME': field.upper(),
'state_struct_name': state_struct_name,
'STATE_STRUCT_NAME': state_struct_name.upper(),
'struct_name': name,
'type': 'client' if client else 'server'
})
elif descr['type'] == 'string':
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
const char *var;
int r;
r = sol_flow_packet_get_string(packet, &var);
if (!r) {
r = sol_util_replace_str_if_changed(&resource->state.%(field_name)s, var);
SOL_INT_CHECK(r, < 0, r);
if (r > 0) {
%(type)s_resource_schedule_update(&resource->base);
r = 0;
}
}
return r;
}
''' % {
'struct_name': name,
'field_name': field,
'type': 'client' if client else 'server'
})
else:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
%(c_type_tmp)s var;
int r;
r = %(c_getter)s(packet, &var);
if (!r) {
if (%(c_check_updated)s(resource->state.%(field_name)s, (%(c_type)s) var)) {
resource->state.%(field_name)s = (%(c_type)s) var;
%(type)s_resource_schedule_update(&resource->base);
}
}
return r;
}
''' % {
'struct_name': name,
'field_name': field,
'c_type': JSON_TO_C[descr['type']],
'c_type_tmp': JSON_TO_C_TMP[descr['type']],
'c_getter': JSON_TO_FLOW_GET_PKT[descr['type']],
'c_check_updated': JSON_TO_FLOW_CHECK_UPDATED[descr['type']],
'type': 'client' if client else 'server'
})
return '\n'.join(fields)
def object_setters_fn_client_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, True)
def object_setters_fn_server_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, False)
def generate_enums_common_c(name, props):
output = []
for field, descr in props.items():
if 'enum' in descr:
if 'description' in descr:
output.append('''/* %s */''' % descr['description'])
output.append('''enum %(struct_name)s_%(field_name)s { %(items)s };''' % {
'struct_name': name,
'field_name': field,
'items': ', '.join(('%s_%s_%s' % (name, field, item)).upper() for item in descr['enum'])
})
output.append('''static const struct sol_str_table %(struct_name)s_%(field_name)s_tbl[] = {
%(items)s,
{ }
};''' % {
'struct_name': name,
'field_name': field,
'items': ',\n'.join('SOL_STR_TABLE_ITEM(\"%s\", %s_%s_%s)' % (
item, name.upper(), field.upper(), item.upper()) for item in descr['enum'])
})
return '\n'.join(output)
def generate_object_client_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct client_resource base;
struct %(state_struct_name)s state;
};
%(to_repr_vec_fn)s
%(from_repr_vec_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'state_struct_name': state_struct_name,
'struct_name': name,
'to_repr_vec_fn': object_to_repr_vec_fn_client_c(state_struct_name, name, props),
'from_repr_vec_fn': object_from_repr_vec_fn_client_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_client_c(state_struct_name, name, props),
'open_fn': object_open_fn_client_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_client_c(name, props),
'setters_fn': object_setters_fn_client_c(state_struct_name, name, props),
}
def generate_object_server_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct server_resource base;
struct %(state_struct_name)s state;
};
%(to_repr_vec_fn)s
%(from_repr_vec_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'struct_name': name,
'state_struct_name': state_struct_name,
'to_repr_vec_fn': object_to_repr_vec_fn_server_c(state_struct_name, name, props),
'from_repr_vec_fn': object_from_repr_vec_fn_server_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_server_c(state_struct_name, name, props),
'open_fn': object_open_fn_server_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_server_c(name, props),
'setters_fn': object_setters_fn_server_c(state_struct_name, name, props)
}
def generate_object_common_c(name, props):
return """%(enums)s
struct %(struct_name)s {
%(struct_fields)s
};
%(from_repr_vec_fn)s
""" % {
'enums': generate_enums_common_c(name, props),
'struct_name': name,
'struct_fields': object_fields_common_c(name, name, props),
'from_repr_vec_fn': object_from_repr_vec_fn_common_c(name, props),
}
def generate_object_json(resource_type, struct_name, node_name, title, props, server):
if server:
in_ports = []
else:
in_ports = [{
'data_type': 'any',
'description':
'Scan all reachable resources that matches the interface. '
'Packets with IDs are sent through output port DEVICE_ID.',
'methods': {
'process': 'scan'
},
'name': 'SCAN'
},
{
'data_type': 'string',
'description':
'Set current server device ID to connect to. Override device ID set in device_id option.',
'methods': {
'process': 'device_id_process'
},
'name': 'DEVICE_ID'
}]
for prop_name, prop_descr in props.items():
if not server and prop_descr['read_only']:
continue
in_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'methods': {
'process': '%s_set_%s' % (struct_name, prop_name)
},
'name': '%s' % prop_name.upper()
})
if server:
out_ports = []
else:
out_ports = [{
'data_type': 'boolean',
'description': 'Outputs true if resource was found, false if not, or if unreachable',
'name': 'FOUND'
},
{
'data_type': 'string',
'description': 'Send packets with IDs for all servers that respond to scan request. Such IDs can be used to connect to a client to a different server through input port DEVICE_ID',
'name': 'DEVICE_ID'
}]
for prop_name, prop_descr in props.items():
out_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'name': '%s' % prop_name.upper()
})
output = {
'methods': {
'open': '%s_open' % struct_name,
'close': '%s_close' % struct_name
},
'private_data_type': struct_name,
'name': node_name,
'url': 'http://solettaproject.org/doc/latest/components/%s.html' % node_name.replace('/', '-')
}
if server:
output.update({
'category': 'iot/server',
'description': 'OIC Server (%s)' % title
})
else:
output.update({
'category': 'iot/client',
'description': 'OIC Client (%s)' % title,
'options': {
'version': 1,
'members': [
{
'data_type': 'string',
'description': 'Unique device ID (UUID, MAC address, etc)',
'name': 'device_id',
'default': ''
}
]
}
})
if in_ports:
output['in_ports'] = in_ports
if out_ports:
output['out_ports'] = out_ports
return output
def generate_object(rt, title, props, json_name):
def type_value(item):
return '%s %s' % (get_type_from_property(item[1]), item[0])
resource_type = rt
if rt.startswith('oic.r.'):
rt = rt[len('oic.r.'):]
elif rt.startswith('core.'):
rt = rt[len('core.'):]
c_identifier = rt.replace(".", "_").replace("-", "_").lower()
c_json_name = json_name.replace(".", "_").replace("-", "_").lower()
flow_identifier = rt.replace(".", "-").replace("_", "-").lower()
flow_json_name = json_name.replace(".", "-").replace("_", "-").lower()
client_node_name = "%s/client-%s" % (flow_json_name, flow_identifier)
client_struct_name = "%s_client_%s" % (c_json_name, c_identifier)
server_node_name = "%s/server-%s" % (flow_json_name, flow_identifier)
server_struct_name = "%s_server_%s" % (c_json_name, c_identifier)
state_struct_name = "%s_state_%s" % (c_json_name, c_identifier)
new_props = OrderedDict()
for k, v in sorted(props.items(), key=type_value):
new_props[k] = v
props = new_props
retval = {
'c_common': generate_object_common_c(state_struct_name, props),
'c_client': generate_object_client_c(resource_type, state_struct_name, client_struct_name, props),
'c_server': generate_object_server_c(resource_type, state_struct_name, server_struct_name, props),
'json_client': generate_object_json(resource_type, client_struct_name, client_node_name, title, props, False),
'json_server': generate_object_json(resource_type, server_struct_name, server_node_name, title, props, True)
}
return retval
def generate_for_schema(directory, path, json_name):
j = load_json_schema(directory, path)
for rt, defn in j.items():
if not (rt.startswith("oic.r.") or rt.startswith("core.")):
raise ValueError("not an OIC resource definition")
if defn.get('type') == 'object':
yield generate_object(rt, defn['title'], defn['properties'],
json_name)
def master_json_as_string(generated, json_name):
master_json = {
'$schema': 'http://solettaproject.github.io/soletta/schemas/node-type-genspec.schema',
'name': json_name,
'meta': {
'author': 'Intel Corporation',
'license': 'Apache-2.0',
'version': '1'
},
'types': [t['json_server'] for t in generated] + [t['json_client'] for t in generated]
}
return json.dumps(master_json, indent=4)
def master_c_as_string(generated, oic_gen_c, oic_gen_h):
generated = list(generated)
code = '''
#include <assert.h>
#include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "%(oic_gen_h)s"
#include "sol-coap.h"
#include "sol-mainloop.h"
#include "sol-oic-common.h"
#include "sol-oic-client.h"
#include "sol-oic-server.h"
#include "sol-str-slice.h"
#include "sol-str-table.h"
#include "sol-util.h"
#define DEFAULT_UDP_PORT 5683
#define MULTICAST_ADDRESS_IPv4 "224.0.1.187"
#define MULTICAST_ADDRESS_IPv6_LOCAL "ff02::fd"
#define MULTICAST_ADDRESS_IPv6_SITE "ff05::fd"
#define FIND_PERIOD_MS 5000
#define UPDATE_TIMEOUT_MS 50
#define DEVICE_ID_LEN (16)
#define streq(a, b) (strcmp((a), (b)) == 0)
#define likely(x) __builtin_expect(!!(x), 1)
struct client_resource;
struct server_resource;
struct client_resource_funcs {
bool (*to_repr_vec)(void *data, struct sol_oic_map_writer *repr_map);
int (*from_repr_vec)(struct client_resource *resource, const struct sol_oic_map_reader *repr_vec);
void (*inform_flow)(struct client_resource *resource);
int found_port;
int device_id_port;
};
struct server_resource_funcs {
bool (*to_repr_vec)(void *data, struct sol_oic_map_writer *repr_map);
int (*from_repr_vec)(struct server_resource *resource, const struct sol_oic_map_reader *repr);
void (*inform_flow)(struct server_resource *resource);
};
struct client_resource {
struct sol_flow_node *node;
const struct client_resource_funcs *funcs;
struct sol_oic_resource *resource;
struct sol_timeout *find_timeout;
struct sol_timeout *update_schedule_timeout;
struct sol_oic_client *client;
const char *rt;
char device_id[DEVICE_ID_LEN];
struct sol_ptr_vector scanned_ids;
};
struct server_resource {
struct sol_flow_node *node;
const struct server_resource_funcs *funcs;
struct sol_oic_server_resource *resource;
struct sol_timeout *update_schedule_timeout;
struct sol_oic_resource_type type;
};
static struct sol_network_link_addr multicast_ipv4, multicast_ipv6_local, multicast_ipv6_site;
static bool
initialize_multicast_addresses_once(void)
{
static bool multicast_addresses_initialized = false;
if (multicast_addresses_initialized)
return true;
multicast_ipv4 = (struct sol_network_link_addr) { .family = SOL_NETWORK_FAMILY_INET, .port = DEFAULT_UDP_PORT };
if (!sol_network_link_addr_from_str(&multicast_ipv4, MULTICAST_ADDRESS_IPv4)) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_local = (struct sol_network_link_addr) { .family = SOL_NETWORK_FAMILY_INET6, .port = DEFAULT_UDP_PORT };
if (!sol_network_link_addr_from_str(&multicast_ipv6_local, MULTICAST_ADDRESS_IPv6_LOCAL)) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_site = (struct sol_network_link_addr) { .family = SOL_NETWORK_FAMILY_INET6, .port = DEFAULT_UDP_PORT };
if (!sol_network_link_addr_from_str(&multicast_ipv6_site, MULTICAST_ADDRESS_IPv6_SITE)) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_addresses_initialized = true;
return true;
}
static bool
client_resource_implements_type(struct sol_oic_resource *oic_res, const char *resource_type)
{
struct sol_str_slice rt = SOL_STR_SLICE_STR(resource_type, strlen(resource_type));
struct sol_str_slice *type;
uint16_t idx;
SOL_VECTOR_FOREACH_IDX(&oic_res->types, type, idx) {
if (sol_str_slice_eq(*type, rt))
return true;
}
return false;
}
static void
state_changed(sol_coap_responsecode_t response_code, struct sol_oic_client *oic_cli, const struct sol_network_link_addr *cliaddr,
const struct sol_oic_map_reader *repr_vec, void *data)
{
struct client_resource *resource = data;
if (!cliaddr || !repr_vec)
return;
if (!sol_network_link_addr_eq(cliaddr, &resource->resource->addr)) {
SOL_BUFFER_DECLARE_STATIC(resaddr, SOL_INET_ADDR_STRLEN);
SOL_BUFFER_DECLARE_STATIC(respaddr, SOL_INET_ADDR_STRLEN);
if (!sol_network_link_addr_to_str(&resource->resource->addr, &resaddr)) {
SOL_WRN("Could not convert network address to string");
return;
}
if (!sol_network_link_addr_to_str(cliaddr, &respaddr)) {
SOL_WRN("Could not convert network address to string");
return;
}
SOL_WRN("Expecting response from %%.*s, got from %%.*s, ignoring",
SOL_STR_SLICE_PRINT(sol_buffer_get_slice(&resaddr)),
SOL_STR_SLICE_PRINT(sol_buffer_get_slice(&respaddr)));
return;
}
if (resource->funcs->from_repr_vec(resource, repr_vec) > 0)
resource->funcs->inform_flow(resource);
}
static bool
found_resource(struct sol_oic_client *oic_cli, struct sol_oic_resource *oic_res, void *data)
{
struct client_resource *resource = data;
int r;
if (!oic_res) {
SOL_WRN("resource discovery timeout");
return false;
}
/* Some OIC device sent this node a discovery response packet but node's already set up. */
if (resource->resource) {
SOL_DBG("Received discovery packet when resource already set up, ignoring");
return false;
}
if (memcmp(oic_res->device_id.data, resource->device_id, 16) != 0) {
/* Not the droid we're looking for. */
SOL_DBG("Received resource with an unknown device_id, ignoring");
return true;
}
/* FIXME: Should this check move to sol-oic-client? Does it actually make sense? */
if (resource->rt && !client_resource_implements_type(oic_res, resource->rt)) {
SOL_DBG("Received resource that does not implement rt=%%s, ignoring", resource->rt);
return true;
}
if (resource->find_timeout) {
sol_timeout_del(resource->find_timeout);
resource->find_timeout = NULL;
}
SOL_INF("Found resource matching device_id");
resource->resource = sol_oic_resource_ref(oic_res);
if (!sol_oic_client_resource_set_observable(oic_cli, resource->resource,
state_changed, resource, true)) {
SOL_WRN("Could not observe resource as requested, will try again");
}
r = sol_flow_send_boolean_packet(resource->node,
resource->funcs->found_port, true);
if (r < 0)
SOL_WRN("Could not send flow packet, will try again");
return false;
}
static void
send_discovery_packets(struct client_resource *resource)
{
if (resource->resource)
return;
sol_flow_send_boolean_packet(resource->node, resource->funcs->found_port, false);
sol_oic_client_find_resource(resource->client, &multicast_ipv4, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(resource->client, &multicast_ipv6_local, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(resource->client, &multicast_ipv6_site, resource->rt,
found_resource, resource);
}
static bool
find_timer(void *data)
{
struct client_resource *resource = data;
if (resource->resource) {
SOL_INF("Timer expired when node already configured; disabling");
resource->find_timeout = NULL;
return false;
}
send_discovery_packets(resource);
return true;
}
static inline char
base16_encode_digit(const uint8_t nibble, const char a)
{
if (likely(nibble < 10))
return '0' + nibble;
return a + (nibble - 10);
}
static void
binary_to_hex_ascii(const char *binary, char *ascii)
{
const uint8_t *input = (const uint8_t *)binary;
size_t i, o = 0;
for (i = 0; i < DEVICE_ID_LEN; i++) {
const uint8_t b = input[i];
uint8_t n;
const uint8_t nibble[2] = {
(b & 0xf0) >> 4,
(b & 0x0f)
};
for (n = 0; n < 2; n++)
ascii[o++] = base16_encode_digit(nibble[n], 'a');
}
ascii[o] = 0;
}
static bool
scan_callback(struct sol_oic_client *oic_cli, struct sol_oic_resource *oic_res, void *data)
{
struct client_resource *resource = data;
char ascii[DEVICE_ID_LEN * 2 + 1];
char *id;
uint16_t i;
int r;
if (!oic_res) {
SOL_WRN("Scanning timeout");
return false;
}
/* FIXME: Should this check move to sol-oic-client? Does it actually make sense? */
if (resource->rt && !client_resource_implements_type(oic_res, resource->rt)) {
SOL_DBG("Received resource that does not implement rt=%%s, ignoring", resource->rt);
return true;
}
SOL_PTR_VECTOR_FOREACH_IDX(&resource->scanned_ids, id, i)
if (memcmp(id, oic_res->device_id.data, DEVICE_ID_LEN) == 0)
return true;
id = malloc(DEVICE_ID_LEN);
SOL_NULL_CHECK(id, true);
memcpy(id, oic_res->device_id.data, DEVICE_ID_LEN);
r = sol_ptr_vector_append(&resource->scanned_ids, id);
SOL_INT_CHECK_GOTO(r, < 0, error);
binary_to_hex_ascii(oic_res->device_id.data, ascii);
r = sol_flow_send_string_packet(resource->node,
resource->funcs->device_id_port, ascii);
if (r < 0)
SOL_WRN("Could not send server id.");
return true;
error:
SOL_WRN("Failed to process id.");
free(id);
return true;
}
static void
clear_scanned_ids(struct sol_ptr_vector *scanned_ids)
{
char *id;
uint16_t i;
SOL_PTR_VECTOR_FOREACH_IDX(scanned_ids, id, i)
free(id);
sol_ptr_vector_clear(scanned_ids);
}
static void
send_scan_packets(struct client_resource *resource)
{
clear_scanned_ids(&resource->scanned_ids);
sol_oic_client_find_resource(resource->client, &multicast_ipv4,
resource->rt, scan_callback, resource);
sol_oic_client_find_resource(resource->client, &multicast_ipv6_local,
resource->rt, scan_callback, resource);
sol_oic_client_find_resource(resource->client, &multicast_ipv6_site,
resource->rt, scan_callback, resource);
}
static bool
server_resource_perform_update(void *data)
{
struct server_resource *resource = data;
SOL_NULL_CHECK(resource->funcs->to_repr_vec, false);
if (!sol_oic_notify_observers(resource->resource,
resource->funcs->to_repr_vec, resource)) {
SOL_WRN("Error while serializing update message");
} else {
resource->funcs->inform_flow(resource);
}
resource->update_schedule_timeout = NULL;
return false;
}
static void
server_resource_schedule_update(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
server_resource_perform_update, resource);
}
static sol_coap_responsecode_t
server_handle_update(const struct sol_network_link_addr *cliaddr, const void *data,
const struct sol_oic_map_reader *repr_map, struct sol_oic_map_writer *output)
{
struct server_resource *resource = (struct server_resource *)data;
int r;
if (!resource->funcs->from_repr_vec)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
r = resource->funcs->from_repr_vec(resource, repr_map);
if (r > 0) {
server_resource_schedule_update(resource);
return SOL_COAP_RSPCODE_CHANGED;
} else if (r == 0)
return SOL_COAP_RSPCODE_OK;
else
return SOL_COAP_RSPCODE_PRECONDITION_FAILED;
}
static sol_coap_responsecode_t
server_handle_get(const struct sol_network_link_addr *cliaddr, const void *data,
const struct sol_oic_map_reader *repr_map, struct sol_oic_map_writer *output)
{
const struct server_resource *resource = data;
if (!resource->funcs->to_repr_vec)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
if (!resource->funcs->to_repr_vec((void *)resource, output))
return SOL_COAP_RSPCODE_INTERNAL_ERROR;
return SOL_COAP_RSPCODE_CONTENT;
}
// log_init() implementation happens within oic-gen.c
static void log_init(void);
static int
server_resource_init(struct server_resource *resource, struct sol_flow_node *node,
struct sol_str_slice resource_type, const struct server_resource_funcs *funcs)
{
log_init();
resource->node = node;
resource->update_schedule_timeout = NULL;
resource->funcs = funcs;
resource->type = (struct sol_oic_resource_type) {
SOL_SET_API_VERSION(.api_version = SOL_OIC_RESOURCE_TYPE_API_VERSION, )
.resource_type = resource_type,
.interface = SOL_STR_SLICE_LITERAL("oc.mi.def"),
.get = { .handle = server_handle_get },
.put = { .handle = server_handle_update },
.post = { .handle = server_handle_update },
};
resource->resource = sol_oic_server_add_resource(&resource->type,
resource, SOL_OIC_FLAG_DISCOVERABLE | SOL_OIC_FLAG_OBSERVABLE | SOL_OIC_FLAG_ACTIVE);
if (resource->resource)
return 0;
return -EINVAL;
}
static void
server_resource_close(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
sol_oic_server_del_resource(resource->resource);
}
static unsigned int
as_nibble(const char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
SOL_WRN("Invalid hex character: %%d", c);
return 0;
}
static void
hex_ascii_to_binary(const char *ascii, char *binary)
{
const char *p;
size_t i;
for (p = ascii, i = 0; i < DEVICE_ID_LEN; i++, p += 2)
binary[i] = as_nibble(*p) << 4 | as_nibble(*(p + 1));
}
static int
client_connect(struct client_resource *resource, const char *device_id)
{
if (!device_id || strlen(device_id) != 32) {
SOL_DBG("Invalid or empty device_id. Not trying to connect.");
return 0;
}
hex_ascii_to_binary(device_id, resource->device_id);
SOL_NULL_CHECK(resource->device_id, -ENOMEM);
if (resource->find_timeout)
sol_timeout_del(resource->find_timeout);
if (resource->resource) {
if (!sol_oic_client_resource_set_observable(resource->client,
resource->resource, NULL, NULL, false)) {
SOL_WRN("Could not unobserve resource");
}
sol_oic_resource_unref(resource->resource);
resource->resource = NULL;
}
SOL_INF("Sending multicast packets to find resource with device_id %%s (rt=%%s)",
device_id, resource->rt);
resource->find_timeout = sol_timeout_add(FIND_PERIOD_MS, find_timer, resource);
if (resource->find_timeout) {
/* Perform a find now instead of waiting FIND_PERIOD_MS the first time. If the
* resource is found in the mean time, the timeout will be automatically disabled. */
send_discovery_packets(resource);
return 0;
}
SOL_ERR("Could not create timeout to find resource");
return -ENOMEM;
}
static int
client_resource_init(struct sol_flow_node *node, struct client_resource *resource, const char *resource_type,
const struct client_resource_funcs *funcs)
{
log_init();
if (!initialize_multicast_addresses_once()) {
SOL_ERR("Could not initialize multicast addresses");
return -ENOTCONN;
}
assert(resource_type);
resource->client = sol_oic_client_new();
SOL_NULL_CHECK(resource->client, -ENOMEM);
sol_ptr_vector_init(&resource->scanned_ids);
resource->node = node;
resource->find_timeout = NULL;
resource->update_schedule_timeout = NULL;
resource->resource = NULL;
resource->funcs = funcs;
resource->rt = resource_type;
return 0;
}
static void
client_resource_close(struct client_resource *resource)
{
if (resource->find_timeout)
sol_timeout_del(resource->find_timeout);
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
if (resource->resource) {
bool r = sol_oic_client_resource_set_observable(resource->client, resource->resource,
NULL, NULL, false);
if (!r)
SOL_WRN("Could not unobserve resource");
sol_oic_resource_unref(resource->resource);
}
clear_scanned_ids(&resource->scanned_ids);
sol_oic_client_del(resource->client);
}
static void
client_resource_update_ack(sol_coap_responsecode_t response_code, struct sol_oic_client *cli, const struct sol_network_link_addr *addr,
const struct sol_oic_map_reader *repr_vec, void *data)
{
struct client_resource *resource = data;
resource->funcs->inform_flow(resource);
}
static bool
client_resource_perform_update(void *data)
{
struct client_resource *resource = data;
int r;
SOL_NULL_CHECK_GOTO(resource->resource, disable_timeout);
SOL_NULL_CHECK_GOTO(resource->funcs->to_repr_vec, disable_timeout);
r = sol_oic_client_resource_request(resource->client, resource->resource,
SOL_COAP_METHOD_PUT, resource->funcs->to_repr_vec, resource,
client_resource_update_ack, data);
if (r < 0) {
SOL_WRN("Could not send update request to resource, will try again");
return true;
}
disable_timeout:
resource->update_schedule_timeout = NULL;
return false;
}
static void
client_resource_schedule_update(struct client_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
client_resource_perform_update, resource);
}
static int
scan(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct client_resource *resource = data;
send_scan_packets(resource);
return 0;
}
static int
device_id_process(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct client_resource *resource = data;
const char *device_id;
int r;
r = sol_flow_packet_get_string(packet, &device_id);
SOL_INT_CHECK(r, < 0, r);
return client_connect(resource, device_id);
}
static inline bool
check_updated_string(const char *a, const char *b)
{
if (a && b)
return strcmp(a, b) != 0;
else if ((a && !b) || (!a && b))
return true;
else
return false;
}
static inline bool
check_updated_int32(const int32_t a, const int32_t b)
{
return a != b;
}
static inline bool
check_updated_boolean(const bool a, const bool b)
{
return a != b;
}
static inline bool
check_updated_number(const double a, const double b)
{
return !sol_util_double_equal(a, b);
}
static inline int
send_string_packet(struct sol_flow_node *src, uint16_t src_port, const char *value)
{
return sol_flow_send_string_packet(src, src_port, value ? value : "");
}
#define RETURN_ERROR(errcode) do { ret = errcode; goto out; } while(0)
%(generated_c_common)s
%(generated_c_client)s
%(generated_c_server)s
#undef RETURN_ERROR
#include "%(oic_gen_c)s"
''' % {
'generated_c_common': '\n'.join(t['c_common'] for t in generated),
'generated_c_client': '\n'.join(t['c_client'] for t in generated),
'generated_c_server': '\n'.join(t['c_server'] for t in generated),
'oic_gen_c': oic_gen_c,
'oic_gen_h': oic_gen_h,
}
return code.replace('\n\n\n', '\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("schema_dirs",
help="Directories where JSON schemas are located. "
"Names must start with 'oic.r.' or 'core.' and use "
"extension '.json'",
nargs="+")
parser.add_argument("--node-type-json",
help="Path to store the master JSON with node type information",
required=True)
parser.add_argument("--node-type-impl",
help="Path to store the node type implementation",
required=True)
parser.add_argument("--node-type-gen-c",
help="Relative path to source generated with "
"sol-flow-node-type-gen for inclusion purposes.",
required=True)
parser.add_argument("--node-type-gen-h",
help="Relative path to header generated with "
"sol-flow-node-type-gen for inclusion purposes.",
required=True)
args = parser.parse_args()
def seems_schema(path):
# TODO properly handle update, batch and error files
if path.endswith('-Update.json') or path.endswith('-Error.json') or \
path.endswith('-Batch.json'):
return False
return path.endswith('.json') and (path.startswith('oic.r.') or path.startswith('core.'))
json_name = os.path.basename(args.node_type_json)
if json_name.endswith(".json"):
json_name = json_name[:-5]
generated = []
print('Generating code for schemas: ', end='')
for schema_dir in args.schema_dirs:
for path in (f for f in os.listdir(schema_dir) if seems_schema(f)):
print(path, end=', ')
try:
for code in generate_for_schema(schema_dir, path, \
json_name):
generated.append(code)
except KeyError as e:
if e.args[0] == 'array':
print("(arrays unsupported)", end=' ')
else:
raise e
except Exception as e:
print('Ignoring %s due to exception in generator. '
'Traceback follows:' % path, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
print('\nWriting master JSON: %s' % args.node_type_json)
open(args.node_type_json, 'w+', encoding='UTF-8').write(
master_json_as_string(generated, json_name))
print('Writing C: %s' % args.node_type_impl)
open(args.node_type_impl, 'w+', encoding='UTF-8').write(
master_c_as_string(generated, args.node_type_gen_c,
args.node_type_gen_h))
if os.path.exists('/usr/bin/indent'):
print('Indenting generated C.')
os.system("/usr/bin/indent -kr -l120 '%s'" % args.node_type_impl)
print('Done.')
|
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ('whitenoise.middleware.WhiteNoiseMiddleware', )
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware', )
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='saasuweb <noreply@example.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[saasuweb] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ("anymail", )
ANYMAIL = {
"MAILGUN_API_KEY": env('DJANGO_MAILGUN_API_KEY'),
"MAILGUN_SENDER_DOMAIN": env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
|
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements classes to query the condor daemons
# and manipulate the results
# Please notice that it also converts \" into "
#
# Author:
# Igor Sfiligoi (Aug 30th 2006)
#
import condorExe
import condorSecurity
import os
import string
import copy
import socket
import xml.parsers.expat
#
# Configuration
#
# Set path to condor binaries
def set_path(new_condor_bin_path):
global condor_bin_path
condor_bin_path = new_condor_bin_path
#
# Caching classes
#
# dummy caching class, when you don't want caching
# used as base class below, too
class NoneScheddCache:
#returns (cms arg schedd string,LOCAL_DIR)
def getScheddId(self,schedd_name,pool_name):
return (self.iGetCmdScheddStr(schedd_name),{})
# INTERNAL and for inheritance
def iGetCmdScheddStr(self,schedd_name):
if schedd_name is None:
schedd_str=""
else:
schedd_str = "-name %s " % schedd_name
return schedd_str
# The schedd can be found either through -name attr
# or through the local disk lookup
# Remember which one to use
class LocalScheddCache(NoneScheddCache):
def __init__(self):
self.enabled=True
# dictionary of
# (schedd_name,pool_name)=>(cms arg schedd string,env)
self.cache={}
self.my_ips=socket.gethostbyname_ex(socket.gethostname())[2]
try:
self.my_ips+=socket.gethostbyname_ex('localhost')[2]
except socket.gaierror,e:
pass # localhost not defined, ignore
def enable(self):
self.enabled=True
def disable(self):
self.enabled=False
#returns (cms arg schedd string,env)
def getScheddId(self,schedd_name,pool_name):
if schedd_name is None: # special case, do not cache
return ("",{})
if self.enabled:
k=(schedd_name,pool_name)
if not self.cache.has_key(k): # not in cache, discover it
env=self.iGetEnv(schedd_name, pool_name)
if env is None: #
self.cache[k]=(self.iGetCmdScheddStr(schedd_name),{})
else:
self.cache[k]=("",env)
return self.cache[k]
else: # not enabled, just return the str
return (self.iGetCmdScheddStr(schedd_name),{})
#
# PRIVATE
#
# return None if not found
# Can raise exceptions
def iGetEnv(self,schedd_name, pool_name):
cs=CondorStatus('schedd',pool_name)
data=cs.fetch(constraint='Name=?="%s"'%schedd_name,format_list=[('ScheddIpAddr','s'),('SPOOL_DIR_STRING','s'),('LOCAL_DIR_STRING','s')])
if not data.has_key(schedd_name):
raise RuntimeError, "Schedd '%s' not found"%schedd_name
el=data[schedd_name]
if 'SPOOL_DIR_STRING' not in el and 'LOCAL_DIR_STRING' not in el: # not advertising, cannot use disk optimization
return None
if not el.has_key('ScheddIpAddr'): # This should never happen
raise RuntimeError, "Schedd '%s' is not advertising ScheddIpAddr"%schedd_name
schedd_ip=el['ScheddIpAddr'][1:].split(':')[0]
if schedd_ip in self.my_ips: #seems local, go for the dir
l=el.get('SPOOL_DIR_STRING', el.get('LOCAL_DIR_STRING'))
if os.path.isdir(l): # making sure the directory exists
if 'SPOOL_DIR_STRING' in el:
return {'_CONDOR_SPOOL': '%s' %l }
else: # LOCAL_DIR_STRING
return {'_CONDOR_SPOOL': '%s/spool' %l }
else: #dir does not exist, likely not relevant, revert to standard behaviour
return None
else: # not local
return None
# default global object
local_schedd_cache=LocalScheddCache()
def condorq_attrs(q_constraint, attribute_list):
"""
Retrieves a list of a single item from the all the factory queues.
"""
attr_str = ""
for attr in attribute_list:
attr_str += " -attr %s" % attr
xml_data = condorExe.exe_cmd("condor_q","-g -l %s -xml -constraint '%s'" % (attr_str, q_constraint))
classads_xml = []
tmp_list = []
for line in xml_data:
# look for the xml header
if line[:5] == "<?xml":
if len(tmp_list) > 0:
classads_xml.append(tmp_list)
tmp_list = []
tmp_list.append(line)
q_proxy_list = []
for ad_xml in classads_xml:
cred_list = xml2list(ad_xml)
q_proxy_list.extend(cred_list)
return q_proxy_list
#
# Condor monitoring classes
#
# Generic, you most probably don't want to use these
class AbstractQuery: # pure virtual, just to have a minimum set of methods defined
# returns the data, will not modify self
def fetch(self,constraint=None,format_list=None):
raise NotImplementedError,"Fetch not implemented"
# will fetch in self.stored_data
def load(self,constraint=None,format_list=None):
raise NotImplementedError,"Load not implemented"
# constraint_func is a boolean function, with only one argument (data el)
# same output as fetch, but limited to constraint_func(el)==True
#
# if constraint_func==None, return all the data
def fetchStored(self,constraint_func=None):
raise NotImplementedError,"fetchStored not implemented"
class StoredQuery(AbstractQuery): # still virtual, only fetchStored defined
stored_data = {}
def fetchStored(self,constraint_func=None):
return applyConstraint(self.stored_data,constraint_func)
#
# format_list is a list of
# (attr_name, attr_type)
# where attr_type is one of
# "s" - string
# "i" - integer
# "r" - real (float)
# "b" - bool
#
#
# security_obj, if defined, should be a child of condorSecurity.ProtoRequest
class QueryExe(StoredQuery): # first fully implemented one, execute commands
def __init__(self,exe_name,resource_str,group_attribute,pool_name=None,security_obj=None,env={}):
self.exe_name=exe_name
self.env=env
self.resource_str=resource_str
self.group_attribute=group_attribute
self.pool_name=pool_name
if pool_name is None:
self.pool_str=""
else:
self.pool_str = "-pool %s" % pool_name
if security_obj is not None:
if security_obj.has_saved_state():
raise RuntimeError, "Cannot use a security object which has saved state."
self.security_obj=copy.deepcopy(security_obj)
else:
self.security_obj=condorSecurity.ProtoRequest()
def require_integrity(self,requested_integrity): # if none, dont change, else forse that one
if requested_integrity is None:
condor_val=None
elif requested_integrity:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','INTEGRITY',condor_val)
def get_requested_integrity(self):
condor_val = self.security_obj.get('CLIENT','INTEGRITY')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def require_encryption(self,requested_encryption): # if none, dont change, else forse that one
if requested_encryption is None:
condor_val=None
elif requested_encryption:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','ENCRYPTION',condor_val)
def get_requested_encryption(self):
condor_val = self.security_obj.get('CLIENT','ENCRYPTION')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def fetch(self,constraint=None,format_list=None):
if constraint is None:
constraint_str=""
else:
constraint_str="-constraint '%s'"%constraint
full_xml=(format_list is None)
if format_list is not None:
format_arr=[]
for format_el in format_list:
attr_name,attr_type=format_el
attr_format={'s':'%s','i':'%i','r':'%f','b':'%i'}[attr_type]
format_arr.append('-format "%s" "%s"'%(attr_format,attr_name))
format_str=string.join(format_arr," ")
# set environment for security settings
self.security_obj.save_state()
self.security_obj.enforce_requests()
if full_xml:
xml_data = condorExe.exe_cmd(self.exe_name,"%s -xml %s %s"%(self.resource_str,self.pool_str,constraint_str),env=self.env);
else:
xml_data = condorExe.exe_cmd(self.exe_name,"%s %s -xml %s %s"%(self.resource_str,format_str,self.pool_str,constraint_str),env=self.env);
# restore old values
self.security_obj.restore_state()
list_data = xml2list(xml_data)
del xml_data
dict_data = list2dict(list_data, self.group_attribute)
return dict_data
def load(self, constraint=None, format_list=None):
self.stored_data = self.fetch(constraint, format_list)
#
# Fully usable query functions
#
# condor_q
class CondorQ(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,["ClusterId","ProcId"],pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId and ProcId are present, and if not add them
format_list = complete_format_list(format_list, [("ClusterId", 'i'), ("ProcId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_q, where we have only one ProcId x ClusterId
class CondorQLite(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,"ClusterId",pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId is present, and if not add it
format_list = complete_format_list(format_list, [("ClusterId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_status
class CondorStatus(QueryExe):
def __init__(self,subsystem_name=None,pool_name=None,security_obj=None):
if subsystem_name is None:
subsystem_str=""
else:
subsystem_str = "-%s" % subsystem_name
QueryExe.__init__(self,"condor_status",subsystem_str,"Name",pool_name,security_obj,{})
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that Name present and if not, add it
format_list = complete_format_list(format_list, [("Name",'s')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
#
# Subquery classes
#
# Generic, you most probably don't want to use this
class BaseSubQuery(StoredQuery):
def __init__(self, query, subquery_func):
self.query = query
self.subquery_func = subquery_func
def fetch(self, constraint=None):
indata = self.query.fetch(constraint)
return self.subquery_func(self, indata)
#
# NOTE: You need to call load on the SubQuery object to use fetchStored
# and had query.load issued before
#
def load(self, constraint=None):
indata = self.query.fetchStored(constraint)
self.stored_data = self.subquery_func(indata)
#
# Fully usable subquery functions
#
class SubQuery(BaseSubQuery):
def __init__(self, query, constraint_func=None):
BaseSubQuery.__init__(self, query, lambda d:applyConstraint(d, constraint_func))
class Group(BaseSubQuery):
# group_key_func - Key extraction function
# One argument: classad dictionary
# Returns: value of the group key
# group_data_func - Key extraction function
# One argument: list of classad dictionaries
# Returns: a summary classad dictionary
def __init__(self, query, group_key_func, group_data_func):
BaseSubQuery.__init__(self, query, lambda d:doGroup(d, group_key_func, group_data_func))
#
# Summarizing classes
#
class Summarize:
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
def __init__(self, query, hash_func=lambda x:1):
self.query = query
self.hash_func = hash_func
# Parameters:
# constraint - string to be passed to query.fetch()
# hash_func - if !=None, use this instead of the main one
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
def count(self, constraint=None, hash_func=None):
data = self.query.fetch(constraint)
return fetch2count(data, self.getHash(hash_func))
# Use data pre-stored in query
# Same output as count
def countStored(self, constraint_func=None, hash_func=None):
data = self.query.fetchStored(constraint_func)
return fetch2count(data, self.getHash(hash_func))
# Parameters, same as count
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
def list(self, constraint=None, hash_func=None):
data = self.query.fetch(constraint)
return fetch2list(data, self.getHash(hash_func))
# Use data pre-stored in query
# Same output as list
def listStored(self,constraint_func=None,hash_func=None):
data=self.query.fetchStored(constraint_func)
return fetch2list(data,self.getHash(hash_func))
### Internal
def getHash(self, hash_func):
if hash_func is None:
return self.hash_func
else:
return hash_func
class SummarizeMulti:
def __init__(self, queries, hash_func=lambda x:1):
self.counts = []
for query in queries:
self.counts.append(self.count(query,hash_func))
self.hash_func=hash_func
# see Count for description
def count(self, constraint=None, hash_func=None):
out = {}
for c in self.counts:
data = c.count(constraint, hash_func)
addDict(out, data)
return out
# see Count for description
def countStored(self, constraint_func=None, hash_func=None):
out = {}
for c in self.counts:
data = c.countStored(constraint_func, hash_func)
addDict(out, data)
return out
############################################################
#
# P R I V A T E, do not use
#
############################################################
# check that req_format_els are present in in_format_list, and if not add them
# return a new format_list
def complete_format_list(in_format_list, req_format_els):
out_format_list = in_format_list[0:]
for req_format_el in req_format_els:
found = False
for format_el in in_format_list:
if format_el[0] == req_format_el[0]:
found = True
break
if not found:
out_format_list.append(req_format_el)
return out_format_list
#
# Convert Condor XML to list
#
# For Example:
#
#<?xml version="1.0"?>
#<!DOCTYPE classads SYSTEM "classads.dtd">
#<classads>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="ExitBySignal"><b v="f"/></a>
# <a n="TransferOutputRemaps"><un/></a>
# <a n="WhenToTransferOutput"><s>ON_EXIT</s></a>
#</c>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="OnExitRemove"><b v="t"/></a>
# <a n="x509userproxysubject"><s>/DC=gov/DC=fnal/O=Fermilab/OU=People/CN=Igor Sfiligoi/UID=sfiligoi</s></a>
#</c>
#</classads>
#
# 3 xml2list XML handler functions
def xml2list_start_element(name, attrs):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if name == "c":
xml2list_inclassad = {}
elif name == "a":
xml2list_inattr = {"name": attrs["n"], "val": ""}
xml2list_intype = "s"
elif name == "i":
xml2list_intype = "i"
elif name == "r":
xml2list_intype = "r"
elif name == "b":
xml2list_intype = "b"
if attrs.has_key('v'):
xml2list_inattr["val"] = (attrs["v"] in ('T', 't', '1'))
else:
# extended syntax... value in text area
xml2list_inattr["val"] = None
elif name == "un":
xml2list_intype = "un"
xml2list_inattr["val"] = None
elif name in ("s", "e"):
pass # nothing to do
elif name == "classads":
pass # top element, nothing to do
else:
raise TypeError, "Unsupported type: %s" % name
def xml2list_end_element(name):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if name == "c":
xml2list_data.append(xml2list_inclassad)
xml2list_inclassad = None
elif name == "a":
xml2list_inclassad[xml2list_inattr["name"]] = xml2list_inattr["val"]
xml2list_inattr = None
elif name in ("i", "b", "un", "r"):
xml2list_intype = "s"
elif name in ("s", "e"):
pass # nothing to do
elif name == "classads":
pass # top element, nothing to do
else:
raise TypeError, "Unexpected type: %s" % name
def xml2list_char_data(data):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if xml2list_inattr is None:
# only process when in attribute
return
if xml2list_intype == "i":
xml2list_inattr["val"] = int(data)
elif xml2list_intype == "r":
xml2list_inattr["val"] = float(data)
elif xml2list_intype == "b":
if xml2list_inattr["val"] is not None:
#nothing to do, value was in attribute
pass
else:
xml2list_inattr["val"] = (data[0] in ('T', 't', '1'))
elif xml2list_intype == "un":
#nothing to do, value was in attribute
pass
else:
unescaped_data = string.replace(data, '\\"', '"')
xml2list_inattr["val"] += unescaped_data
def xml2list(xml_data):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
xml2list_data = []
xml2list_inclassad = None
xml2list_inattr = None
xml2list_intype = None
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = xml2list_start_element
p.EndElementHandler = xml2list_end_element
p.CharacterDataHandler = xml2list_char_data
found_xml = -1
for line in range(len(xml_data)):
# look for the xml header
if xml_data[line][:5] == "<?xml":
found_xml = line
break
if found_xml >= 0:
try:
p.Parse(string.join(xml_data[found_xml:]), 1)
except TypeError, e:
raise RuntimeError, "Failed to parse XML data, TypeError: %s" % e
except:
raise RuntimeError, "Failed to parse XML data, generic error"
# else no xml, so return an empty list
return xml2list_data
#
# Convert a list to a dictionary
#
def list2dict(list_data, attr_name):
if type(attr_name) in (type([]), type((1, 2))):
attr_list = attr_name
else:
attr_list = [attr_name]
dict_data = {}
for list_el in list_data:
if type(attr_name) in (type([]), type((1, 2))):
dict_name = []
list_keys=list_el.keys()
for an in attr_name:
if an in list_keys:
dict_name.append(list_el[an])
else:
# Try lower cases
for k in list_keys:
if an.lower()==k.lower():
dict_name.append(list_el[k])
break
dict_name=tuple(dict_name)
else:
dict_name = list_el[attr_name]
# dict_el will have all the elements but those in attr_list
dict_el = {}
for a in list_el:
if not (a in attr_list):
dict_el[a] = list_el[a]
dict_data[dict_name] = dict_el
return dict_data
def applyConstraint(data, constraint_func):
if constraint_func is None:
return data
else:
outdata = {}
for key in data.keys():
if constraint_func(data[key]):
outdata[key] = data[key]
return outdata
def doGroup(indata, group_key_func, group_data_func):
gdata = {}
for k in indata.keys():
inel = indata[k]
gkey = group_key_func(inel)
if gdata.has_key(gkey):
gdata[gkey].append(inel)
else:
gdata[gkey] = [inel]
outdata = {}
for k in gdata.keys():
outdata[k] = group_data_func(gdata[k])
return outdata
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
#
def fetch2count(data, hash_func):
count = {}
for k in data.keys():
el = data[k]
hid = hash_func(el)
if hid is None:
# hash tells us it does not want to count this
continue
# cel will point to the real counter
cel = count
# check if it is a list
if (type(hid) == type([])):
# have to create structure inside count
for h in hid[:-1]:
if not cel.has_key(h):
cel[h] = {}
cel = cel[h]
hid = hid[-1]
if cel.has_key(hid):
count_el = cel[hid] + 1
else:
count_el = 1
cel[hid] = count_el
return count
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
#
def fetch2list(data, hash_func):
return_list = {}
for k in data.keys():
el = data[k]
hid = hash_func(el)
if hid is None:
# hash tells us it does not want to list this
continue
# lel will point to the real list
lel = return_list
# check if it is a list
if (type(hid) == type([])):
# have to create structure inside list
for h in hid[:-1]:
if not lel.has_key(h):
lel[h] = {}
lel = lel[h]
hid = hid[-1]
if lel.has_key(hid):
list_el = lel[hid].append[k]
else:
list_el = [k]
lel[hid] = list_el
return return_list
#
# Recursivelly add two dictionaries
# Do it in place, using the first one
#
def addDict(base_dict, new_dict):
for k in new_dict.keys():
new_el = new_dict[k]
if not base_dict.has_key(k):
# nothing there?, just copy
base_dict[k] = new_el
else:
if type(new_el) == type({}):
#another dictionary, recourse
addDict(base_dict[k], new_el)
else:
base_dict[k] += new_el
|
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http.cookiejar as cookielib
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import git_superproject
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class _CheckoutError(Exception):
"""Internal error thrown in _CheckoutOne() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object directory. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('--no-manifest-update', '--nmu',
dest='mp_update', action='store_false', default='true',
help='use the existing manifest checkout as-is. '
'(do not update to the latest revision)')
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all sync output')
p.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--clone-bundle', action='store_true',
help='enable use of /clone.bundle on HTTP/HTTPS')
p.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--use-superproject', action='store_true',
help='use the manifest superproject to sync projects')
p.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--retry-fetches',
default=0, action='store', type='int',
help='number of times to retry fetches on transient errors')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _GetBranch(self):
"""Returns the branch name for getting the approved manifest."""
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
return branch
def _UpdateProjectsRevisionId(self, opt, args):
"""Update revisionId of every project with the SHA from superproject.
This function updates each project's revisionId with SHA from superproject.
It writes the updated manifest into a file and reloads the manifest from it.
Args:
opt: Program options returned from optparse. See _Options().
args: Arguments to pass to GetProjects. See the GetProjects
docstring for details.
Returns:
Returns path to the overriding manifest file.
"""
superproject = git_superproject.Superproject(self.manifest,
self.repodir)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
manifest_path = superproject.UpdateProjectsRevisionId(all_projects)
if not manifest_path:
print('error: Update of revsionId from superproject has failed',
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_path)
return manifest_path
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=opt.clone_bundle,
tags=opt.tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
prune=opt.prune,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
pm.update(msg=project.name)
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)'
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt, err_event):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects),
always_print_percentage=opt.quiet)
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target=self._FetchProjectList,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects, opt, err_event)
return fetched
def _CheckoutWorker(self, opt, sem, project, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _CheckoutOne.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the
_CheckoutOne docstring for details.
"""
try:
return self._CheckoutOne(opt, project, *args, **kwargs)
finally:
sem.release()
def _CheckoutOne(self, opt, project, lock, pm, err_event, err_results):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
lock: Lock for accessing objects that are shared amongst multiple
_CheckoutWorker() threads.
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
success = syncbuf.Finish()
did_lock = True
if not success:
err_event.set()
print('error: Cannot checkout %s' % (project.name),
file=sys.stderr)
raise _CheckoutError()
pm.update(msg=project.name)
except _CheckoutError:
pass
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
if not success:
err_results.append(project.relpath)
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
return success
def _Checkout(self, all_projects, opt, err_event, err_results):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
"""
# Perform checkouts in multiple threads when we are using partial clone.
# Without partial clone, all needed git objects are already downloaded,
# in this situation it's better to use only one process because the checkout
# would be mostly disk I/O; with partial clone, the objects are only
# downloaded when demanded (at checkout time), which is similar to the
# Sync_NetworkHalf case and parallelism would be helpful.
if self.manifest.CloneFilter:
syncjobs = self.jobs
else:
syncjobs = 1
lock = _threading.Lock()
pm = Progress('Checking out projects', len(all_projects))
threads = set()
sem = _threading.Semaphore(syncjobs)
for project in all_projects:
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
if project.worktree:
kwargs = dict(opt=opt,
sem=sem,
project=project,
lock=lock,
pm=pm,
err_event=err_event,
err_results=err_results)
if syncjobs > 1:
t = _threading.Thread(target=self._CheckoutWorker,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._CheckoutWorker(**kwargs)
for t in threads:
t.join()
pm.end()
def _GCProjects(self, projects, opt, err_event):
gc_gitdirs = {}
for project in projects:
# Make sure pruning never kicks in with shared projects.
if (not project.use_git_worktrees and
len(project.manifest.GetProjectsWithName(project.name)) > 1):
if not opt.quiet:
print('%s: Shared project %s found, disabling pruning.' %
(project.relpath, project.name))
if git_require((2, 7, 0)):
project.EnableRepositoryExtension('preciousObjects')
else:
# This isn't perfect, but it's the best we can do with old git.
print('%s: WARNING: shared projects are unreliable when using old '
'versions of git; please upgrade to git-2.7.0+.'
% (project.relpath,),
file=sys.stderr)
project.config.SetString('gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except Exception:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
with open(file_path, 'r') as fd:
old_project_paths = fd.read().split('\n')
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest=self.manifest,
name=path,
remote=RemoteSpec('origin'),
gitdir=gitdir,
objdir=gitdir,
use_git_worktrees=os.path.isfile(gitdir),
worktree=os.path.join(self.manifest.topdir, path),
relpath=path,
revisionExpr='HEAD',
revisionId=None,
groups=None)
if not project.DeleteWorktree(
quiet=opt.quiet,
force=opt.force_remove_dirty):
return 1
new_project_paths.sort()
with open(file_path, 'w') as fd:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
return 0
def _SmartSyncSetup(self, opt, smart_sync_manifest_path):
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if '@' not in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
branch = self._GetBranch()
if 'SYNC_TARGET' in os.environ:
target = os.environ['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif ('TARGET_PRODUCT' in os.environ and
'TARGET_BUILD_VARIANT' in os.environ):
target = '%s-%s' % (os.environ['TARGET_PRODUCT'],
os.environ['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = os.path.basename(smart_sync_manifest_path)
try:
with open(smart_sync_manifest_path, 'w') as f:
f.write(manifest_str)
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
return manifest_name
def _UpdateManifestProject(self, opt, mp, manifest_name):
"""Fetch & update the local manifest project."""
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
tags=opt.tags,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(opt.manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, 'smart_sync_override.xml')
if opt.clone_bundle is None:
opt.clone_bundle = self.manifest.CloneBundle
if opt.smart_sync or opt.smart_tag:
manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path)
else:
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
err_event = _threading.Event()
rp = self.manifest.repoProject
rp.PreSync()
cb = rp.CurrentBranch
if cb:
base = rp.GetBranch(cb).merge
if not base or not base.startswith('refs/heads/'):
print('warning: repo is not tracking a remote branch, so it will not '
'receive updates; run `repo init --repo-rev=stable` to fix.',
file=sys.stderr)
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.mp_update:
print('Skipping update of local manifest project.')
else:
self._UpdateManifestProject(opt, mp, manifest_name)
if opt.use_superproject:
manifest_name = self._UpdateProjectsRevisionId(opt, args)
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
err_network_sync = False
err_update_projects = False
err_checkout = False
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt, err_event)
_PostRepoFetch(rp, opt.repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr)
sys.exit(1)
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt, err_event))
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
err_network_sync = True
if opt.fail_fast:
print('\nerror: Exited sync due to fetch errors.\n'
'Local checkouts *not* updated. Resolve network issues & '
'retry.\n'
'`repo sync -l` will update some local checkouts.',
file=sys.stderr)
sys.exit(1)
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
err_event.set()
err_update_projects = True
if opt.fail_fast:
print('\nerror: Local checkouts *not* updated.', file=sys.stderr)
sys.exit(1)
err_results = []
self._Checkout(all_projects, opt, err_event, err_results)
if err_event.isSet():
err_checkout = True
# NB: We don't exit here because this is the last step.
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Unable to fully sync the tree.', file=sys.stderr)
if err_network_sync:
print('error: Downloading network changes failed.', file=sys.stderr)
if err_update_projects:
print('error: Updating local project lists failed.', file=sys.stderr)
if err_checkout:
print('error: Checking out local projects failed.', file=sys.stderr)
if err_results:
print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr)
print('Try re-running with "-j1 --fail-fast" to exit at the first error.',
file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('repo sync has finished successfully.')
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, repo_verify=True, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if not repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir
env['GNUPGHOME'] = gpg_dir
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a * t) + ((1 - a) * old)
def _Load(self):
if self._times is None:
try:
with open(self._path) as f:
self._times = json.load(f)
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
with open(self._path, 'w') as f:
json.dump(self._times, f, indent=2)
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile(mode='w')
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy})
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import codecs
import copy
import fnmatch
import logging
import os
import shutil
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from datetime import datetime, timedelta
import pkg_resources
import sqlalchemy
import yaml
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession
Base = declarative_base()
Session = sessionmaker(class_=ContextSession)
from flexget import config_schema, db_schema, logger, plugin # noqa
from flexget.event import fire_event # noqa
from flexget.ipc import IPCClient, IPCServer # noqa
from flexget.options import CoreArgumentParser, get_parser, manager_parser, ParserError, unicode_argv # noqa
from flexget.task import Task # noqa
from flexget.task_queue import TaskQueue # noqa
from flexget.utils.tools import pid_exists, console # noqa
log = logging.getLogger('manager')
manager = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
class Manager(object):
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use, however no database lock is
present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested, this event will run when it has been
acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database, this event will be fired to
allow plugins to upgrade their tables
* manager.shutdown_requested
When shutdown has been requested. Any plugins which might add to execution queue should stop when this is fired.
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options = None
def __init__(self, args):
"""
:param args: CLI args
"""
global manager
assert not manager, 'Only one instance of Manager should be created at a time!'
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.config_base = None
self.config_name = None
self.config_path = None
self.db_filename = None
self.engine = None
self.lockfile = None
self.database_uri = None
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server = None
self.task_queue = None
self.persist = None
self.initialized = False
self.config = {}
if '--help' in args or '-h' in args:
# TODO: This is a bit hacky, but we can't call parse on real arguments when --help is used because it will
# cause a system exit before plugins are loaded and print incomplete help. This will get us a default
# options object and we'll parse the real args later, or send them to daemon. #2807
self.options, extra = CoreArgumentParser().parse_known_args(['execute'])
else:
try:
self.options, extra = CoreArgumentParser().parse_known_args(args)
except ParserError:
# If a non-built-in command was used, we need to parse with a parser that doesn't define the subparsers
self.options, extra = manager_parser.parse_known_args(args)
try:
self.find_config(create=False)
except:
logger.start(level=self.options.loglevel.upper(), to_file=False)
raise
else:
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
logger.start(log_file, self.options.loglevel.upper(), to_console=not self.options.cron)
manager = self
log.debug('sys.defaultencoding: %s' % sys.getdefaultencoding())
log.debug('sys.getfilesystemencoding: %s' % sys.getfilesystemencoding())
log.debug('os.path.supports_unicode_filenames: %s' % os.path.supports_unicode_filenames)
if codecs.lookup(sys.getfilesystemencoding()).name == 'ascii' and not os.path.supports_unicode_filenames:
log.warning('Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.')
def __del__(self):
global manager
manager = None
def initialize(self):
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins(extra_dirs=[os.path.join(self.config_base, 'plugins')])
# Reparse CLI options now that plugins are loaded
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
log.critical('Failed to load config file: %s' % e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
log.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self):
"""A list of tasks in the config"""
if not self.config:
return []
return self.config.get('tasks', {}).keys()
@property
def has_lock(self):
return self._has_lock
def execute(self, options=None, output=None, loglevel=None, priority=1):
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param output: If a file-like object is specified here, log messages and stdout from the execution will be
written to it.
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Handle --tasks
if options.tasks:
# Consider * the same as not specifying tasks at all (makes sure manual plugin still works)
if options.tasks == ['*']:
options.tasks = None
else:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [t for t in self.tasks if fnmatch.fnmatchcase(unicode(t).lower(), arg.lower())]
if not matches:
msg = '`%s` does not match any tasks' % arg
log.error(msg)
if output:
output.write(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535))
finished_events = []
for task_name in task_names:
task = Task(self, task_name, options=options, output=output, loglevel=loglevel, priority=priority)
self.task_queue.put(task)
finished_events.append((task.id, task.name, task.finished_event))
return finished_events
def start(self):
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# When we are in test mode, we use a different lock file and db
if self.options.test:
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
if ipc_info:
console('There is a FlexGet process already running for this config, sending execution there.')
log.debug('Sending command to running FlexGet process: %s' % self.args)
try:
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
log.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
log.error('Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.')
except EOFError:
log.error('Connection from daemon was severed.')
return
if self.options.test:
log.info('Test mode, creating a copy from database ...')
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
log.info('Test database created')
self.db_filename = db_test_filename
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options=None):
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
command = options.cli_command
command_options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon']:
if command == 'execute':
self.execute_command(command_options)
elif command == 'daemon':
self.daemon_command(command_options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, command_options)
def execute_command(self, options):
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive():
if len(self.task_queue):
log.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options, output=logger.get_capture_stream(),
loglevel=logger.get_capture_loglevel())
if not options.cron:
# Wait until execution of all tasks has finished
for task_id, task_name, event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options):
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
# Import API so it can register to daemon.started event
if options.action == 'start':
if self.is_daemon:
log.error('Daemon already running for this config.')
return
elif self.task_queue.is_alive():
log.error('Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.')
return
if options.daemonize:
self.daemonize()
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
log.debug('Error registering sigterm handler: %s' % e)
self.is_daemon = True
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
elif options.action in ['stop', 'reload', 'status']:
if not self.is_daemon:
log.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
log.info('Daemon running. (PID: %s)' % os.getpid())
elif options.action == 'stop':
tasks = 'all queued tasks (if any) have' if options.wait else 'currently running task (if any) has'
log.info('Daemon shutdown requested. Shutdown will commence when %s finished executing.' % tasks)
self.shutdown(options.wait)
elif options.action == 'reload':
log.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
log.error('Error loading config: %s' % e.args[0])
else:
log.info('Config successfully reloaded from disk.')
def _handle_sigterm(self, signum, frame):
log.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self):
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(unicode, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def find_config(self, create=False):
"""
Find the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `IOError` when no config file could be found, and `create` is False.
"""
config = None
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
log.debug('Figuring out config load paths')
try:
possible.append(os.getcwdu())
except OSError:
log.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
log.debug('Adding virtualenv path')
possible.append(sys.prefix.decode(sys.getfilesystemencoding()))
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
log.debug('Found config: %s' % config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
log.info('Config file %s not found. Creating new config %s' % (options_config, config))
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
log.critical('Failed to find configuration file %s' % options_config)
log.info('Tried to read from: %s' % ', '.join(possible))
raise IOError('No configuration file found.')
if not os.path.isfile(config):
raise IOError('Config `%s` does not appear to be a file.' % config)
log.debug('Config file %s selected' % config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
def load_config(self):
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with codecs.open(self.config_path, 'rb', 'utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
log.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
config = yaml.safe_load(raw_config) or {}
except Exception as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
log.critical(msg, exc_info=False)
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(' o If text contains any of :[]{}% characters it must be single-quoted '
'(eg. value{1} should be \'value{1}\')\n')
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if hasattr(e, 'problem') and hasattr(e, 'context_mark') and hasattr(e, 'problem_mark'):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(' Check configuration near line %s, column %s' % (e.context_mark.line, e.context_mark.column))
lines += 1
if e.problem_mark is not None:
print(' Check configuration near line %s, column %s' % (e.problem_mark.line, e.problem_mark.column))
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
log.debug('config_name: %s' % self.config_name)
log.debug('config_base: %s' % self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config):
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
new_user_config = config
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
log.critical('[%s] %s', error.json_pointer, error.message)
log.debug('invalid config, rolling back')
self.config = old_config
raise
log.debug('New config data loaded.')
self.user_config = new_user_config
fire_event('manager.config_updated', self)
def save_config(self):
"""Dumps current config to yaml config file"""
# TODO: Only keep x number of backups..
# Back up the user's current config before overwriting
backup_path = os.path.join(self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')))
log.debug('backing up old config to %s before new save' % backup_path)
shutil.copy(self.config_path, backup_path)
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.user_config, default_flow_style=False))
def config_changed(self):
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
for task in self.tasks:
config_changed(task)
fire_event('manager.config_updated', self)
def validate_config(self, config=None):
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
if not config:
config = self.config
config = fire_event('manager.before_config_validate', config, self)
errors = config_schema.process_config(config)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return config
def init_sqlalchemy(self):
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print('FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.', file=sys.stderr)
sys.exit(1)
except ValueError as e:
log.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
if self.db_filename and not os.path.exists(self.db_filename):
log.verbose('Creating new database %s ...' % self.db_filename)
# fire up the engine
log.debug('Connecting to: %s' % self.database_uri)
try:
self.engine = sqlalchemy.create_engine(self.database_uri,
echo=self.options.debug_sql,
connect_args={'check_same_thread': False, 'timeout': 10})
except ImportError:
print('FATAL: Unable to use SQLite. Are you running Python 2.5 - 2.7 ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.', file=sys.stderr)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print('%s - make sure you have write permissions to file %s' %
(e.message, self.db_filename), file=sys.stderr)
else:
print('%s - make sure you have write permissions to directory %s' %
(e.message, self.config_base), file=sys.stderr)
raise
def _read_lock(self):
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result = {}
with open(self.lockfile) as f:
lines = [l for l in f.readlines() if l]
for line in lines:
try:
key, value = line.split(b':', 1)
except ValueError:
log.debug('Invalid line in lock file: %s' % line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = int(result[key])
result.setdefault('pid', None)
if not result['pid']:
log.error('Invalid lock file. Make sure FlexGet is not running, then delete it.')
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self):
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self):
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event=True):
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with open(self.lockfile) as f:
pid = f.read()
print('Another process (%s) is running, will exit.' % pid.split('\n')[0], file=sys.stderr)
print('If you\'re sure there is no other instance running, delete %s' % self.lockfile,
file=sys.stderr)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info=None):
assert self._has_lock
with open(self.lockfile, 'w') as f:
f.write(b'PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write(b'%s: %s\n' % (key, ipc_info[key]))
def release_lock(self):
if os.path.exists(self.lockfile):
os.remove(self.lockfile)
log.debug('Removed %s' % self.lockfile)
else:
log.debug('Lockfile %s not found' % self.lockfile)
def daemonize(self):
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
log.error('Cannot daemonize on windows')
return
if threading.activeCount() != 1:
log.critical('There are %r active threads. '
'Daemonizing now may cause strange failures.' % threading.enumerate())
log.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
log.info('Daemonize complete. New PID: %s' % os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file('/dev/null', 'a+')
se = file('/dev/null', 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force=False):
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL
if force or expired:
log.info('Running database cleanup.')
session = Session()
try:
fire_event('manager.db_cleanup', self, session)
session.commit()
finally:
session.close()
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
log.debug('Not running db cleanup, last run %s' % self.persist.get('last_cleanup'))
def shutdown(self, finish_queue=True):
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
fire_event('manager.shutdown_requested', self)
self.task_queue.shutdown(finish_queue)
def _shutdown(self):
"""Runs when the manager is done processing everything."""
if self.ipc_server:
self.ipc_server.shutdown()
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
log.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if 'test' not in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
log.info('Removed test database')
def crash_report(self):
"""
This should be called when handling an unexpected exception. Will create a new log file containing the last 50
debug messages as well as the crash traceback.
"""
if not self.unit_test:
filename = os.path.join(self.config_base, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log'))
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.writelines(logger.debug_buffer)
traceback.print_exc(file=outfile)
log.critical('An unexpected crash has occurred. Writing crash report to %s' % filename)
log.debug('Traceback:', exc_info=True)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TTransport import TTransportBase, TTransportException, \
TServerTransportBase
import os
import sys
import errno
import select
import socket
import warnings
import time
try:
import fcntl
except ImportError:
# Windows doesn't have this module
fcntl = None
class ConnectionEpoll:
""" epoll is preferred over select due to its efficiency and ability to
handle more than 1024 simultaneous connections """
def __init__(self):
self.epoll = select.epoll()
# TODO should we set any other masks?
# http://docs.python.org/library/select.html#epoll-objects
self.READ_MASK = select.EPOLLIN | select.EPOLLPRI
self.WRITE_MASK = select.EPOLLOUT
self.ERR_MASK = select.EPOLLERR | select.EPOLLHUP
def read(self, fileno):
self.unregister(fileno)
self.epoll.register(
fileno, self.READ_MASK | self.ERR_MASK
)
def write(self, fileno):
self.unregister(fileno)
self.epoll.register(fileno, self.WRITE_MASK)
def unregister(self, fileno):
try:
self.epoll.unregister(fileno)
except:
pass
def process(self, timeout):
# poll() invokes a "long" syscall that will be interrupted by any signal
# that comes in, causing an EINTR error. If this happens, avoid dying
# horribly by trying again with the appropriately shortened timout.
deadline = time.clock() + float(timeout or 0)
poll_timeout = float(timeout or -1)
while True:
if timeout is not None and timeout > 0:
poll_timeout = max(0, deadline - time.clock())
try:
msgs = self.epoll.poll(timeout=poll_timeout)
break
except IOError as e:
if e.errno == errno.EINTR:
continue
else:
raise
rset = []
wset = []
xset = []
for fd, mask in msgs:
if mask & self.READ_MASK:
rset.append(fd)
if mask & self.WRITE_MASK:
wset.append(fd)
if mask & self.ERR_MASK:
xset.append(fd)
return rset, wset, xset
class ConnectionSelect:
def __init__(self):
self.readable = set()
self.writable = set()
def read(self, fileno):
if fileno in self.writable:
self.writable.remove(fileno)
self.readable.add(fileno)
def write(self, fileno):
if fileno in self.readable:
self.readable.remove(fileno)
self.writable.add(fileno)
def unregister(self, fileno):
if fileno in self.readable:
self.readable.remove(fileno)
elif fileno in self.writable:
self.writable.remove(fileno)
def registered(self, fileno):
return fileno in self.readable or fileno in self.writable
def process(self, timeout):
# select() invokes a "long" syscall that will be interrupted by any
# signal that comes in, causing an EINTR error. If this happens,
# avoid dying horribly by trying again with the appropriately
# shortened timout.
deadline = time.clock() + float(timeout or 0)
poll_timeout = timeout if timeout is None or timeout > 0 else None
while True:
if timeout is not None and timeout > 0:
poll_timeout = max(0, deadline - time.clock())
try:
return select.select(list(self.readable), list(self.writable),
list(self.readable), poll_timeout)
except IOError as e:
if e.errno == errno.EINTR:
continue
else:
raise
class TSocketBase(TTransportBase):
"""Base class for both connected and listening sockets"""
def __init__(self):
self.handles = {}
def _resolveAddr(self, family=None):
if family is None:
family = socket.AF_UNSPEC
if self._unix_socket is not None:
return [(socket.AF_UNIX, socket.SOCK_STREAM, None, None,
self._unix_socket)]
else:
ai_flags = 0
if self.host is None:
ai_flags |= socket.AI_PASSIVE
return socket.getaddrinfo(self.host, self.port, family,
socket.SOCK_STREAM, 0,
ai_flags)
def close(self):
klist = self.handles.keys() if sys.version_info[0] < 3 else \
list(self.handles.keys())
for key in klist:
self.handles[key].close()
del self.handles[key]
def getSocketName(self):
if not self.handles:
raise TTransportException(TTransportException.NOT_OPEN,
'Transport not open')
return next(iter(self.handles.values())).getsockname()
def fileno(self):
if not self.handles:
raise TTransportException(TTransportException.NOT_OPEN,
'Transport not open')
if sys.version_info[0] >= 3:
return list(self.handles.values())[0].fileno()
else:
return self.handles.values()[0].fileno()
def setCloseOnExec(self, closeOnExec):
self.close_on_exec = closeOnExec
for handle in self.handles.values():
self._setHandleCloseOnExec(handle)
def _setHandleCloseOnExec(self, handle):
# Windows doesn't have this module, don't set the handle in this case.
if fcntl is None:
return
flags = fcntl.fcntl(handle, fcntl.F_GETFD, 0)
if flags < 0:
raise IOError('Error in retrieving file options')
if self.close_on_exec:
fcntl.fcntl(handle, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
else:
fcntl.fcntl(handle, fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC)
class TSocket(TSocketBase):
"""Connection Socket implementation of TTransport base."""
def __init__(self, host='localhost', port=9090, unix_socket=None):
"""Initialize a TSocket
@param host(str) The host to connect to.
@param port(int) The (TCP) port to connect to.
@param unix_socket(str) The filename of a unix socket to connect to.
(host and port will be ignored.)
"""
TSocketBase.__init__(self)
self.host = host
self.port = port
self.handle = None
self._unix_socket = unix_socket
self._timeout = None
self.close_on_exec = True
def __enter__(self):
if not self.isOpen():
self.open()
return self
def __exit__(self, type, value, traceback):
if self.isOpen():
self.close()
def setHandle(self, h):
self.handle = h
self.handles[h.fileno()] = h
def getHandle(self):
return self.handle
def close(self):
TSocketBase.close(self)
self.handle = None
def isOpen(self):
return self.handle is not None
def setTimeout(self, ms):
if ms is None:
self._timeout = None
else:
self._timeout = ms / 1000.0
if self.handle is not None:
self.handle.settimeout(self._timeout)
def getPeerName(self):
if not self.handle:
raise TTransportException(TTransportException.NOT_OPEN,
'Transport not open')
return self.handle.getpeername()
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
handle = socket.socket(res[0], res[1])
self.setHandle(handle)
handle.settimeout(self._timeout)
self.setCloseOnExec(self.close_on_exec)
try:
handle.connect(res[4])
except socket.error as e:
self.close()
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error as e:
if self._unix_socket:
message = 'Could not connect to socket %s: %s' % \
(self._unix_socket, repr(e))
else:
message = 'Could not connect to %s:%d: %s' % \
(self.host, self.port, repr(e))
raise TTransportException(TTransportException.NOT_OPEN, message)
def read(self, sz):
try:
buff = self.handle.recv(sz)
if len(buff) == 0:
raise TTransportException(type=TTransportException.END_OF_FILE,
message='TSocket read 0 bytes')
except socket.error as e:
message = 'Socket read failed with error %s (%s)' % \
(e.errno, e.strerror)
raise TTransportException(type=TTransportException.END_OF_FILE,
message=message)
return buff
def write(self, buff):
if not self.handle:
raise TTransportException(TTransportException.NOT_OPEN,
'Transport not open')
sent = 0
have = len(buff)
while sent < have:
try:
plus = self.handle.send(buff)
except socket.error as e:
message = 'Socket send failed with error %s (%s)' % (e.errno,
e.strerror)
raise TTransportException(type=TTransportException.END_OF_FILE,
message=message)
assert plus > 0
sent += plus
buff = buff[plus:]
def flush(self):
pass
class TServerSocket(TSocketBase, TServerTransportBase):
"""Socket implementation of TServerTransport base."""
def __init__(self, port=9090, unix_socket=None, family=None, backlog=128):
TSocketBase.__init__(self)
self.host = None
self.port = port
self._unix_socket = unix_socket
self.family = family
self.tcp_backlog = backlog
self.close_on_exec = True
# Since we now rely on select() by default to do accepts across
# multiple socket fds, we can receive two connections concurrently.
# In order to maintain compatibility with the existing .accept() API,
# we need to keep track of the accept backlog.
self._queue = []
def __enter__(self):
if not self.isListening():
self.listen()
return self
def __exit__(self, type, value, traceback):
if self.isListening():
self.close()
def getSocketName(self):
warnings.warn('getSocketName() is deprecated for TServerSocket. '
'Please use getSocketNames() instead.')
return TSocketBase.getSocketName(self)
def getSocketNames(self):
return [handle.getsockname() for handle in self.handles.values()]
def fileno(self):
warnings.warn('fileno() is deprecated for TServerSocket. '
'Please use filenos() instead.')
return TSocketBase.fileno(self)
def filenos(self):
return [handle.fileno() for handle in self.handles.values()]
def _cleanup_unix_socket(self, addrinfo):
tmp = socket.socket(addrinfo[0], addrinfo[1])
try:
tmp.connect(addrinfo[4])
except socket.error as err:
eno, message = err.args
if eno == errno.ECONNREFUSED:
os.unlink(addrinfo[4])
def isListening(self):
return bool(self.handles)
def listen(self):
res0 = self._resolveAddr(self.family)
for res in res0:
if res[0] == socket.AF_INET6 and res[4][0] == socket.AF_INET6:
# This happens if your version of python was built without IPv6
# support. getaddrinfo() will return IPv6 addresses, but the
# contents of the address field are bogus.
# (For example, see http://bugs.python.org/issue8858)
#
# Ignore IPv6 addresses if python doesn't have IPv6 support.
continue
# We need remove the old unix socket if the file exists and
# nobody is listening on it.
if self._unix_socket:
self._cleanup_unix_socket(res)
# Don't complain if we can't create a socket
# since this is handled below.
try:
handle = socket.socket(res[0], res[1])
except:
continue
handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._setHandleCloseOnExec(handle)
# Always set IPV6_V6ONLY for IPv6 sockets when not on Windows
if res[0] == socket.AF_INET6 and sys.platform != 'win32':
handle.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, True)
handle.settimeout(None)
handle.bind(res[4])
handle.listen(self.tcp_backlog)
self.handles[handle.fileno()] = handle
if not self.handles:
raise TTransportException("No valid interfaces to listen on!")
def _sock_accept(self):
if self._queue:
return self._queue.pop()
if hasattr(select, "epoll"):
poller = ConnectionEpoll()
else:
poller = ConnectionSelect()
for filenos in self.handles.keys():
poller.read(filenos)
r, _, x = poller.process(0)
for fd in r:
self._queue.append(self.handles[fd].accept())
if not self._queue:
raise TTransportException("Accept interrupt without client?")
return self._queue.pop()
def accept(self):
return self._makeTSocketFromAccepted(self._sock_accept())
def _makeTSocketFromAccepted(self, accepted):
client, addr = accepted
result = TSocket()
result.setHandle(client)
return result
|
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The DPGMM class is not working correctly and it's better "
"to not use it. DPGMM is deprecated in 0.18 and "
"will be removed in 0.20.")
class DPGMM(_DPGMMBase):
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The VBGMM class is not working correctly and it's better "
"to not use it. VBGMM is deprecated in 0.18 and "
"will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import frappe
import unittest
import functools
import ldap3
import ssl
import os
from unittest import mock
from frappe.integrations.doctype.ldap_settings.ldap_settings import LDAPSettings
from ldap3 import Server, Connection, MOCK_SYNC, OFFLINE_SLAPD_2_4, OFFLINE_AD_2012_R2
class LDAP_TestCase():
TEST_LDAP_SERVER = None # must match the 'LDAP Settings' field option
TEST_LDAP_SEARCH_STRING = None
LDAP_USERNAME_FIELD = None
DOCUMENT_GROUP_MAPPINGS = []
LDAP_SCHEMA = None
LDAP_LDIF_JSON = None
TEST_VALUES_LDAP_COMPLEX_SEARCH_STRING = None
def mock_ldap_connection(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
with mock.patch('frappe.integrations.doctype.ldap_settings.ldap_settings.LDAPSettings.connect_to_ldap') as mock_connection:
mock_connection.return_value = self.connection
self.test_class = LDAPSettings(self.doc)
# Create a clean doc
localdoc = self.doc.copy()
frappe.get_doc(localdoc).save()
rv = f(self, *args, **kwargs)
# Clean-up
self.test_class = None
return rv
return wrapped
def clean_test_users():
try: # clean up test user 1
frappe.get_doc("User", 'posix.user1@unit.testing').delete()
except Exception:
pass
try: # clean up test user 2
frappe.get_doc("User", 'posix.user2@unit.testing').delete()
except Exception:
pass
@classmethod
def setUpClass(self, ldapServer='OpenLDAP'):
self.clean_test_users()
# Save user data for restoration in tearDownClass()
self.user_ldap_settings = frappe.get_doc('LDAP Settings')
# Create test user1
self.user1doc = {
'username': 'posix.user',
'email': 'posix.user1@unit.testing',
'first_name': 'posix'
}
self.user1doc.update({
"doctype": "User",
"send_welcome_email": 0,
"language": "",
"user_type": "System User",
})
user = frappe.get_doc(self.user1doc)
user.insert(ignore_permissions=True)
# Create test user1
self.user2doc = {
'username': 'posix.user2',
'email': 'posix.user2@unit.testing',
'first_name': 'posix'
}
self.user2doc.update({
"doctype": "User",
"send_welcome_email": 0,
"language": "",
"user_type": "System User",
})
user = frappe.get_doc(self.user2doc)
user.insert(ignore_permissions=True)
# Setup Mock OpenLDAP Directory
self.ldap_dc_path = 'dc=unit,dc=testing'
self.ldap_user_path = 'ou=users,' + self.ldap_dc_path
self.ldap_group_path = 'ou=groups,' + self.ldap_dc_path
self.base_dn = 'cn=base_dn_user,' + self.ldap_dc_path
self.base_password = 'my_password'
self.ldap_server = 'ldap://my_fake_server:389'
self.doc = {
"doctype": "LDAP Settings",
"enabled": True,
"ldap_directory_server": self.TEST_LDAP_SERVER,
"ldap_server_url": self.ldap_server,
"base_dn": self.base_dn,
"password": self.base_password,
"ldap_search_path_user": self.ldap_user_path,
"ldap_search_string": self.TEST_LDAP_SEARCH_STRING,
"ldap_search_path_group": self.ldap_group_path,
"ldap_user_creation_and_mapping_section": '',
"ldap_email_field": 'mail',
"ldap_username_field": self.LDAP_USERNAME_FIELD,
"ldap_first_name_field": 'givenname',
"ldap_middle_name_field": '',
"ldap_last_name_field": 'sn',
"ldap_phone_field": 'telephonenumber',
"ldap_mobile_field": 'mobile',
"ldap_security": '',
"ssl_tls_mode": '',
"require_trusted_certificate": 'No',
"local_private_key_file": '',
"local_server_certificate_file": '',
"local_ca_certs_file": '',
"ldap_group_objectclass": '',
"ldap_group_member_attribute": '',
"default_role": 'Newsletter Manager',
"ldap_groups": self.DOCUMENT_GROUP_MAPPINGS,
"ldap_group_field": ''}
self.server = Server(host=self.ldap_server, port=389, get_info=self.LDAP_SCHEMA)
self.connection = Connection(
self.server,
user=self.base_dn,
password=self.base_password,
read_only=True,
client_strategy=MOCK_SYNC)
self.connection.strategy.entries_from_json(os.path.abspath(os.path.dirname(__file__)) + '/' + self.LDAP_LDIF_JSON)
self.connection.bind()
@classmethod
def tearDownClass(self):
try:
frappe.get_doc('LDAP Settings').delete()
except Exception:
pass
try:
# return doc back to user data
self.user_ldap_settings.save()
except Exception:
pass
# Clean-up test users
self.clean_test_users()
# Clear OpenLDAP connection
self.connection = None
@mock_ldap_connection
def test_mandatory_fields(self):
mandatory_fields = [
'ldap_server_url',
'ldap_directory_server',
'base_dn',
'password',
'ldap_search_path_user',
'ldap_search_path_group',
'ldap_search_string',
'ldap_email_field',
'ldap_username_field',
'ldap_first_name_field',
'require_trusted_certificate',
'default_role'
] # fields that are required to have ldap functioning need to be mandatory
for mandatory_field in mandatory_fields:
localdoc = self.doc.copy()
localdoc[mandatory_field] = ''
try:
frappe.get_doc(localdoc).save()
self.fail('Document LDAP Settings field [{0}] is not mandatory'.format(mandatory_field))
except frappe.exceptions.MandatoryError:
pass
except frappe.exceptions.ValidationError:
if mandatory_field == 'ldap_search_string':
# additional validation is done on this field, pass in this instance
pass
for non_mandatory_field in self.doc: # Ensure remaining fields have not been made mandatory
if non_mandatory_field == 'doctype' or non_mandatory_field in mandatory_fields:
continue
localdoc = self.doc.copy()
localdoc[non_mandatory_field] = ''
try:
frappe.get_doc(localdoc).save()
except frappe.exceptions.MandatoryError:
self.fail('Document LDAP Settings field [{0}] should not be mandatory'.format(non_mandatory_field))
@mock_ldap_connection
def test_validation_ldap_search_string(self):
invalid_ldap_search_strings = [
'',
'uid={0}',
'(uid={0}',
'uid={0})',
'(&(objectclass=posixgroup)(uid={0})',
'&(objectclass=posixgroup)(uid={0}))',
'(uid=no_placeholder)'
] # ldap search string must be enclosed in '()' for ldap search to work for finding user and have the same number of opening and closing brackets.
for invalid_search_string in invalid_ldap_search_strings:
localdoc = self.doc.copy()
localdoc['ldap_search_string'] = invalid_search_string
try:
frappe.get_doc(localdoc).save()
self.fail("LDAP search string [{0}] should not validate".format(invalid_search_string))
except frappe.exceptions.ValidationError:
pass
def test_connect_to_ldap(self):
# setup a clean doc with ldap disabled so no validation occurs (this is tested seperatly)
local_doc = self.doc.copy()
local_doc['enabled'] = False
self.test_class = LDAPSettings(self.doc)
with mock.patch('ldap3.Server') as ldap3_server_method:
with mock.patch('ldap3.Connection') as ldap3_connection_method:
ldap3_connection_method.return_value = self.connection
with mock.patch('ldap3.Tls') as ldap3_Tls_method:
function_return = self.test_class.connect_to_ldap(base_dn=self.base_dn, password=self.base_password)
args, kwargs = ldap3_connection_method.call_args
prevent_connection_parameters = {
# prevent these parameters for security or lack of the und user from being able to configure
'mode': {
'IP_V4_ONLY': 'Locks the user to IPv4 without frappe providing a way to configure',
'IP_V6_ONLY': 'Locks the user to IPv6 without frappe providing a way to configure'
},
'auto_bind': {
'NONE': 'ldap3.Connection must autobind with base_dn',
'NO_TLS': 'ldap3.Connection must have TLS',
'TLS_AFTER_BIND': '[Security] ldap3.Connection TLS bind must occur before bind'
}
}
for connection_arg in kwargs:
if connection_arg in prevent_connection_parameters and \
kwargs[connection_arg] in prevent_connection_parameters[connection_arg]:
self.fail('ldap3.Connection was called with {0}, failed reason: [{1}]'.format(
kwargs[connection_arg],
prevent_connection_parameters[connection_arg][kwargs[connection_arg]]))
if local_doc['require_trusted_certificate'] == 'Yes':
tls_validate = ssl.CERT_REQUIRED
tls_version = ssl.PROTOCOL_TLS_CLIENT
tls_configuration = ldap3.Tls(validate=tls_validate, version=tls_version)
self.assertTrue(kwargs['auto_bind'] == ldap3.AUTO_BIND_TLS_BEFORE_BIND,
'Security: [ldap3.Connection] autobind TLS before bind with value ldap3.AUTO_BIND_TLS_BEFORE_BIND')
else:
tls_validate = ssl.CERT_NONE
tls_version = ssl.PROTOCOL_TLS_CLIENT
tls_configuration = ldap3.Tls(validate=tls_validate, version=tls_version)
self.assertTrue(kwargs['auto_bind'],
'ldap3.Connection must autobind')
ldap3_Tls_method.assert_called_with(validate=tls_validate, version=tls_version)
ldap3_server_method.assert_called_with(host=self.doc['ldap_server_url'], tls=tls_configuration)
self.assertTrue(kwargs['password'] == self.base_password,
'ldap3.Connection password does not match provided password')
self.assertTrue(kwargs['raise_exceptions'],
'ldap3.Connection must raise exceptions for error handling')
self.assertTrue(kwargs['user'] == self.base_dn,
'ldap3.Connection user does not match provided user')
ldap3_connection_method.assert_called_with(server=ldap3_server_method.return_value,
auto_bind=True,
password=self.base_password,
raise_exceptions=True,
read_only=True,
user=self.base_dn)
self.assertTrue(type(function_return) is ldap3.core.connection.Connection,
'The return type must be of ldap3.Connection')
function_return = self.test_class.connect_to_ldap(base_dn=self.base_dn, password=self.base_password, read_only=False)
args, kwargs = ldap3_connection_method.call_args
self.assertFalse(kwargs['read_only'], 'connect_to_ldap() read_only parameter supplied as False but does not match the ldap3.Connection() read_only named parameter')
@mock_ldap_connection
def test_get_ldap_client_settings(self):
result = self.test_class.get_ldap_client_settings()
self.assertIsInstance(result, dict)
self.assertTrue(result['enabled'] == self.doc['enabled']) # settings should match doc
localdoc = self.doc.copy()
localdoc['enabled'] = False
frappe.get_doc(localdoc).save()
result = self.test_class.get_ldap_client_settings()
self.assertFalse(result['enabled']) # must match the edited doc
@mock_ldap_connection
def test_update_user_fields(self):
test_user_data = {
'username': 'posix.user',
'email': 'posix.user1@unit.testing',
'first_name': 'posix',
'middle_name': 'another',
'last_name': 'user',
'phone': '08 1234 5678',
'mobile_no': '0421 123 456'
}
test_user = frappe.get_doc("User", test_user_data['email'])
self.test_class.update_user_fields(test_user, test_user_data)
updated_user = frappe.get_doc("User", test_user_data['email'])
self.assertTrue(updated_user.middle_name == test_user_data['middle_name'])
self.assertTrue(updated_user.last_name == test_user_data['last_name'])
self.assertTrue(updated_user.phone == test_user_data['phone'])
self.assertTrue(updated_user.mobile_no == test_user_data['mobile_no'])
@mock_ldap_connection
def test_sync_roles(self):
if self.TEST_LDAP_SERVER.lower() == 'openldap':
test_user_data = {
'posix.user1': ['Users', 'Administrators', 'default_role', 'frappe_default_all','frappe_default_guest'],
'posix.user2': ['Users', 'Group3', 'default_role', 'frappe_default_all', 'frappe_default_guest']
}
elif self.TEST_LDAP_SERVER.lower() == 'active directory':
test_user_data = {
'posix.user1': ['Domain Users', 'Domain Administrators', 'default_role', 'frappe_default_all','frappe_default_guest'],
'posix.user2': ['Domain Users', 'Enterprise Administrators', 'default_role', 'frappe_default_all', 'frappe_default_guest']
}
role_to_group_map = {
self.doc['ldap_groups'][0]['erpnext_role']: self.doc['ldap_groups'][0]['ldap_group'],
self.doc['ldap_groups'][1]['erpnext_role']: self.doc['ldap_groups'][1]['ldap_group'],
self.doc['ldap_groups'][2]['erpnext_role']: self.doc['ldap_groups'][2]['ldap_group'],
'Newsletter Manager': 'default_role',
'All': 'frappe_default_all',
'Guest': 'frappe_default_guest',
}
# re-create user1 to ensure clean
frappe.get_doc("User", 'posix.user1@unit.testing').delete()
user = frappe.get_doc(self.user1doc)
user.insert(ignore_permissions=True)
for test_user in test_user_data:
test_user_doc = frappe.get_doc("User", test_user + '@unit.testing')
test_user_roles = frappe.get_roles(test_user + '@unit.testing')
self.assertTrue(len(test_user_roles) == 2,
'User should only be a part of the All and Guest roles') # check default frappe roles
self.test_class.sync_roles(test_user_doc, test_user_data[test_user]) # update user roles
frappe.get_doc("User", test_user + '@unit.testing')
updated_user_roles = frappe.get_roles(test_user + '@unit.testing')
self.assertTrue(len(updated_user_roles) == len(test_user_data[test_user]),
'syncing of the user roles failed. {0} != {1} for user {2}'.format(len(updated_user_roles), len(test_user_data[test_user]), test_user))
for user_role in updated_user_roles: # match each users role mapped to ldap groups
self.assertTrue(role_to_group_map[user_role] in test_user_data[test_user],
'during sync_roles(), the user was given role {0} which should not have occured'.format(user_role))
@mock_ldap_connection
def test_create_or_update_user(self):
test_user_data = {
'posix.user1': ['Users', 'Administrators', 'default_role', 'frappe_default_all','frappe_default_guest'],
}
test_user = 'posix.user1'
frappe.get_doc("User", test_user + '@unit.testing').delete() # remove user 1
with self.assertRaises(frappe.exceptions.DoesNotExistError): # ensure user deleted so function can be tested
frappe.get_doc("User", test_user + '@unit.testing')
with mock.patch('frappe.integrations.doctype.ldap_settings.ldap_settings.LDAPSettings.update_user_fields') \
as update_user_fields_method:
update_user_fields_method.return_value = None
with mock.patch('frappe.integrations.doctype.ldap_settings.ldap_settings.LDAPSettings.sync_roles') as sync_roles_method:
sync_roles_method.return_value = None
# New user
self.test_class.create_or_update_user(self.user1doc, test_user_data[test_user])
self.assertTrue(sync_roles_method.called, 'User roles need to be updated for a new user')
self.assertFalse(update_user_fields_method.called,
'User roles are not required to be updated for a new user, this will occur during logon')
# Existing user
self.test_class.create_or_update_user(self.user1doc, test_user_data[test_user])
self.assertTrue(sync_roles_method.called, 'User roles need to be updated for an existing user')
self.assertTrue(update_user_fields_method.called, 'User fields need to be updated for an existing user')
@mock_ldap_connection
def test_get_ldap_attributes(self):
method_return = self.test_class.get_ldap_attributes()
self.assertTrue(type(method_return) is list)
@mock_ldap_connection
def test_fetch_ldap_groups(self):
if self.TEST_LDAP_SERVER.lower() == 'openldap':
test_users = {
'posix.user': ['Users', 'Administrators'],
'posix.user2': ['Users', 'Group3']
}
elif self.TEST_LDAP_SERVER.lower() == 'active directory':
test_users = {
'posix.user': ['Domain Users', 'Domain Administrators'],
'posix.user2': ['Domain Users', 'Enterprise Administrators']
}
for test_user in test_users:
self.connection.search(
search_base=self.ldap_user_path,
search_filter=self.TEST_LDAP_SEARCH_STRING.format(test_user),
attributes=self.test_class.get_ldap_attributes())
method_return = self.test_class.fetch_ldap_groups(self.connection.entries[0], self.connection)
self.assertIsInstance(method_return, list)
self.assertTrue(len(method_return) == len(test_users[test_user]))
for returned_group in method_return:
self.assertTrue(returned_group in test_users[test_user])
@mock_ldap_connection
def test_authenticate(self):
with mock.patch('frappe.integrations.doctype.ldap_settings.ldap_settings.LDAPSettings.fetch_ldap_groups') as \
fetch_ldap_groups_function:
fetch_ldap_groups_function.return_value = None
self.assertTrue(self.test_class.authenticate('posix.user', 'posix_user_password'))
self.assertTrue(fetch_ldap_groups_function.called,
'As part of authentication function fetch_ldap_groups_function needs to be called')
invalid_users = [
{'prefix_posix.user': 'posix_user_password'},
{'posix.user_postfix': 'posix_user_password'},
{'posix.user': 'posix_user_password_postfix'},
{'posix.user': 'prefix_posix_user_password'},
{'posix.user': ''},
{'': 'posix_user_password'},
{'': ''}
] # All invalid users should return 'invalid username or password'
for username, password in enumerate(invalid_users):
with self.assertRaises(frappe.exceptions.ValidationError) as display_massage:
self.test_class.authenticate(username, password)
self.assertTrue(str(display_massage.exception).lower() == 'invalid username or password',
'invalid credentials passed authentication [user: {0}, password: {1}]'.format(username, password))
@mock_ldap_connection
def test_complex_ldap_search_filter(self):
ldap_search_filters = self.TEST_VALUES_LDAP_COMPLEX_SEARCH_STRING
for search_filter in ldap_search_filters:
self.test_class.ldap_search_string = search_filter
if 'ACCESS:test3' in search_filter: # posix.user does not have str in ldap.description auth should fail
with self.assertRaises(frappe.exceptions.ValidationError) as display_massage:
self.test_class.authenticate('posix.user', 'posix_user_password')
self.assertTrue(str(display_massage.exception).lower() == 'invalid username or password')
else:
self.assertTrue(self.test_class.authenticate('posix.user', 'posix_user_password'))
def test_reset_password(self):
self.test_class = LDAPSettings(self.doc)
# Create a clean doc
localdoc = self.doc.copy()
localdoc['enabled'] = False
frappe.get_doc(localdoc).save()
with mock.patch('frappe.integrations.doctype.ldap_settings.ldap_settings.LDAPSettings.connect_to_ldap') as connect_to_ldap:
connect_to_ldap.return_value = self.connection
with self.assertRaises(frappe.exceptions.ValidationError) as validation: # Fail if username string used
self.test_class.reset_password('posix.user', 'posix_user_password')
self.assertTrue(str(validation.exception) == 'No LDAP User found for email: posix.user')
try:
self.test_class.reset_password('posix.user1@unit.testing', 'posix_user_password') # Change Password
except Exception: # An exception from the tested class is ok, as long as the connection to LDAP was made writeable
pass
connect_to_ldap.assert_called_with(self.base_dn, self.base_password, read_only=False)
@mock_ldap_connection
def test_convert_ldap_entry_to_dict(self):
self.connection.search(
search_base=self.ldap_user_path,
search_filter=self.TEST_LDAP_SEARCH_STRING.format("posix.user"),
attributes=self.test_class.get_ldap_attributes())
test_ldap_entry = self.connection.entries[0]
method_return = self.test_class.convert_ldap_entry_to_dict(test_ldap_entry)
self.assertTrue(type(method_return) is dict) # must be dict
self.assertTrue(len(method_return) == 6) # there are 6 fields in mock_ldap for use
class Test_OpenLDAP(LDAP_TestCase, unittest.TestCase):
TEST_LDAP_SERVER = 'OpenLDAP'
TEST_LDAP_SEARCH_STRING = '(uid={0})'
DOCUMENT_GROUP_MAPPINGS = [
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Administrators",
"erpnext_role": "System Manager"
},
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Users",
"erpnext_role": "Blogger"
},
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Group3",
"erpnext_role": "Accounts User"
}
]
LDAP_USERNAME_FIELD = 'uid'
LDAP_SCHEMA = OFFLINE_SLAPD_2_4
LDAP_LDIF_JSON = 'test_data_ldif_openldap.json'
TEST_VALUES_LDAP_COMPLEX_SEARCH_STRING = [
'(uid={0})',
'(&(objectclass=posixaccount)(uid={0}))',
'(&(description=*ACCESS:test1*)(uid={0}))', # OpenLDAP has no member of group, use description to filter posix.user has equivilent of AD 'memberOf'
'(&(objectclass=posixaccount)(description=*ACCESS:test3*)(uid={0}))' # OpenLDAP has no member of group, use description to filter posix.user doesn't have. equivilent of AD 'memberOf'
]
class Test_ActiveDirectory(LDAP_TestCase, unittest.TestCase):
TEST_LDAP_SERVER = 'Active Directory'
TEST_LDAP_SEARCH_STRING = '(samaccountname={0})'
DOCUMENT_GROUP_MAPPINGS = [
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Domain Administrators",
"erpnext_role": "System Manager"
},
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Domain Users",
"erpnext_role": "Blogger"
},
{
"doctype": "LDAP Group Mapping",
"ldap_group": "Enterprise Administrators",
"erpnext_role": "Accounts User"
}
]
LDAP_USERNAME_FIELD = 'samaccountname'
LDAP_SCHEMA = OFFLINE_AD_2012_R2
LDAP_LDIF_JSON = 'test_data_ldif_activedirectory.json'
TEST_VALUES_LDAP_COMPLEX_SEARCH_STRING = [
'(samaccountname={0})',
'(&(objectclass=user)(samaccountname={0}))',
'(&(description=*ACCESS:test1*)(samaccountname={0}))', # OpenLDAP has no member of group, use description to filter posix.user has equivilent of AD 'memberOf'
'(&(objectclass=user)(description=*ACCESS:test3*)(samaccountname={0}))' # OpenLDAP has no member of group, use description to filter posix.user doesn't have. equivilent of AD 'memberOf'
]
|
|
from six import string_types, iteritems
import tensorflow as tf
import numpy as np
import cv2
import os
def detect_faces(img, mtcnn):
margin = 44
image_size = 160
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, landmarks = detect_face(img, mtcnn["pnet"], mtcnn["rnet"], mtcnn["onet"])
nrof_bb = bounding_boxes.shape[0]
padded_bounding_boxes = []
face_patches = []
if nrof_bb > 0:
landmarks = np.stack(landmarks)
landmarks = np.transpose(landmarks, (1, 0))
for i in range(nrof_bb):
det = np.squeeze(bounding_boxes[i, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1] : bb[3], bb[0] : bb[2], :]
aligned = cv2.resize(cropped, (image_size, image_size))
prewhitened = prewhiten(aligned)
padded_bounding_boxes.append(bb)
face_patches.append(prewhitened)
return face_patches, padded_bounding_boxes, landmarks
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA)
return im_data
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
def nms(boxes, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method is "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = np.maximum(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))
return bboxA
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
def bbreg(boundingbox, reg):
# calibrate bounding boxes
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault("name", self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError("No input variables found for layer %s." % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError("Must be implemented by the subclass.")
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
data_dict = np.load(data_path, encoding="latin1", allow_pickle=True).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError("Unknown layer name fed: %s" % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return "%s_%d" % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ("SAME", "VALID")
@layer
def conv(self, inp, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding="SAME", group=1, biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
def convolve(i, k):
return tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var("weights", shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var("biases", [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var("alpha", shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding="SAME"):
self.validate_padding(padding)
return tf.nn.max_pool(inp, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding, name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var("weights", shape=[dim, num_out])
biases = self.make_var("biases", [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(
self.feed("data")
.conv(3, 3, 10, 1, 1, padding="VALID", relu=False, name="conv1")
.prelu(name="PReLU1")
.max_pool(2, 2, 2, 2, name="pool1")
.conv(3, 3, 16, 1, 1, padding="VALID", relu=False, name="conv2")
.prelu(name="PReLU2")
.conv(3, 3, 32, 1, 1, padding="VALID", relu=False, name="conv3")
.prelu(name="PReLU3")
.conv(1, 1, 2, 1, 1, relu=False, name="conv4-1")
.softmax(3, name="prob1")
)
(self.feed("PReLU3").conv(1, 1, 4, 1, 1, relu=False, name="conv4-2"))
class RNet(Network):
def setup(self):
(
self.feed("data")
.conv(3, 3, 28, 1, 1, padding="VALID", relu=False, name="conv1")
.prelu(name="prelu1")
.max_pool(3, 3, 2, 2, name="pool1")
.conv(3, 3, 48, 1, 1, padding="VALID", relu=False, name="conv2")
.prelu(name="prelu2")
.max_pool(3, 3, 2, 2, padding="VALID", name="pool2")
.conv(2, 2, 64, 1, 1, padding="VALID", relu=False, name="conv3")
.prelu(name="prelu3")
.fc(128, relu=False, name="conv4")
.prelu(name="prelu4")
.fc(2, relu=False, name="conv5-1")
.softmax(1, name="prob1")
)
(self.feed("prelu4").fc(4, relu=False, name="conv5-2"))
class ONet(Network):
def setup(self):
(
self.feed("data")
.conv(3, 3, 32, 1, 1, padding="VALID", relu=False, name="conv1")
.prelu(name="prelu1")
.max_pool(3, 3, 2, 2, name="pool1")
.conv(3, 3, 64, 1, 1, padding="VALID", relu=False, name="conv2")
.prelu(name="prelu2")
.max_pool(3, 3, 2, 2, padding="VALID", name="pool2")
.conv(3, 3, 64, 1, 1, padding="VALID", relu=False, name="conv3")
.prelu(name="prelu3")
.max_pool(2, 2, 2, 2, name="pool3")
.conv(2, 2, 128, 1, 1, padding="VALID", relu=False, name="conv4")
.prelu(name="prelu4")
.fc(256, relu=False, name="conv5")
.prelu(name="prelu5")
.fc(2, relu=False, name="conv6-1")
.softmax(1, name="prob1")
)
(self.feed("prelu5").fc(4, relu=False, name="conv6-2"))
(self.feed("prelu5").fc(10, relu=False, name="conv6-3"))
def create_mtcnn(sess, model_path):
if not model_path:
model_path, _ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope("pnet"):
data = tf.placeholder(tf.float32, (None, None, None, 3), "input")
pnet = PNet({"data": data})
pnet.load(os.path.join(model_path, "det1.npy"), sess)
with tf.variable_scope("rnet"):
data = tf.placeholder(tf.float32, (None, 24, 24, 3), "input")
rnet = RNet({"data": data})
rnet.load(os.path.join(model_path, "det2.npy"), sess)
with tf.variable_scope("onet"):
data = tf.placeholder(tf.float32, (None, 48, 48, 3), "input")
onet = ONet({"data": data})
onet.load(os.path.join(model_path, "det3.npy"), sess)
def pnet_fun(img):
return sess.run(("pnet/conv4-2/BiasAdd:0", "pnet/prob1:0"), feed_dict={"pnet/input:0": img})
def rnet_fun(img):
return sess.run(("rnet/conv5-2/conv5-2:0", "rnet/prob1:0"), feed_dict={"rnet/input:0": img})
def onet_fun(img):
return sess.run(
("onet/conv6-2/conv6-2:0", "onet/conv6-3/conv6-3:0", "onet/prob1:0"), feed_dict={"onet/input:0": img}
)
return {"pnet": pnet_fun, "rnet": rnet_fun, "onet": onet_fun}
def detect_face(img, pnet, rnet, onet):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
factor_count = 0
total_boxes = np.empty((0, 9))
points = []
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# creat scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale = scales[j]
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, "Union")
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, "Union")
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[y[k] - 1 : ey[k], x[k] - 1 : ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, "Union")
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[y[k] - 1 : ey[k], x[k] - 1 : ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, "Min")
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = meta.tables.keys()
meta.bind = migrate_engine
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
if table_name == 'instances' and column.name == 'locked_by':
enum = Enum('owner', 'admin',
name='shadow_instances0locked_by')
column_copy = Column(column.name, enum)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1),
'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2),
'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3),
'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4),
'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5)
}
try:
i = instance_types_table.insert()
for name, values in default_inst_types.iteritems():
i.execute({'name': name, 'memory_mb': values["mem"],
'vcpus': values["vcpus"], 'deleted': 0,
'root_gb': values["root_gb"],
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
'flavorid': values["flavid"],
'disabled': False,
'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
LOG.exception(_('Exception while seeding instance_types table'))
raise
# NOTE(dprince): we add these here so our schema contains dump tables
# which were added in migration 209 (in Havana). We can drop these in
# Icehouse: https://bugs.launchpad.net/nova/+bug/1266538
def _create_dump_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
dump_table_name = 'dump_' + table.name
columns = []
for column in table.columns:
# NOTE(dprince): The dump_ tables were originally created from an
# earlier schema version so we don't want to add the pci_stats
# column so that schema diffs are exactly the same.
if column.name == 'pci_stats':
continue
else:
columns.append(column.copy())
table_dump = Table(dump_table_name, meta, *columns,
mysql_engine='InnoDB')
table_dump.create()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=True),
Column('delete_on_termination', Boolean),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
Column('source_type', String(length=255), nullable=True),
Column('destination_type', String(length=255), nullable=True),
Column('guest_format', String(length=255), nullable=True),
Column('device_type', String(length=255), nullable=True),
Column('disk_bus', String(length=255), nullable=True),
Column('boot_index', Integer),
Column('image_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('deleted', Integer),
Column('transport_url', String(length=255), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
Column('host_ip', InetSmall()),
Column('supported_instances', Text),
Column('pci_stats', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column('instance_uuid', String(length=36),
ForeignKey('instances.uuid',
name='consoles_instance_uuid_fkey')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = Table('instance_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('uuid', String(length=36), nullable=False),
Column('name', String(length=255)),
UniqueConstraint('uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = Table('instance_group_policy', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('policy', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = Table('instance_group_member', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_id', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by')
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
Column('locked_by', inst_lock_enum),
Column('cleaned', Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
pci_devices = Table('pci_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Integer, default=0, nullable=False),
Column('id', Integer, primary_key=True),
Column('compute_node_id', Integer, nullable=False),
Column('address', String(12), nullable=False),
Column('product_id', String(4)),
Column('vendor_id', String(4)),
Column('dev_type', String(8)),
Column('dev_id', String(255)),
Column('label', String(255), nullable=False),
Column('status', String(36), nullable=False),
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
UniqueConstraint('compute_node_id',
'address', 'deleted',
name=pci_devices_uc_name),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
project_user_quotas = Table('project_user_quotas', meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('user_id',
String(length=255),
nullable=False),
Column('project_id',
String(length=255),
nullable=False),
Column('resource',
String(length=255),
nullable=False),
Column('hard_limit', Integer, nullable=True),
UniqueConstraint('user_id', 'project_id', 'resource',
'deleted', name=uniq_name),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
Index('project_id', instances.c.project_id).create()
Index('uuid', instances.c.uuid, unique=True).create()
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, snapshots, volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
groups, group_metadata, group_policy, group_member,
iscsi_targets, key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception(_('Exception while creating table.'))
raise
# task log unique constraint
task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_networks0vlan0deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_instance_types0name0deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_instance_types0flavorid0deleted').create()
# keypair contraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \
'project_id0deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_floating_ips0address0deleted').create()
# instance_info_caches
UniqueConstraint('instance_uuid',
table=instance_info_caches,
name='uniq_instance_info_caches0instance_uuid').create()
UniqueConstraint('address', 'deleted',
table=virtual_interfaces,
name='uniq_virtual_interfaces0address0deleted').create()
# cells
UniqueConstraint('name', 'deleted',
table=cells,
name='uniq_cells0name0deleted').create()
# security_groups
uc = UniqueConstraint('project_id', 'name', 'deleted',
table=security_groups,
name='uniq_security_groups0project_id0name0deleted')
uc.create()
# quotas
UniqueConstraint('project_id', 'resource', 'deleted',
table=quotas,
name='uniq_quotas0project_id0resource0deleted').create()
# fixed_ips
UniqueConstraint('address', 'deleted',
table=fixed_ips,
name='uniq_fixed_ips0address0deleted').create()
# services
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
# agent_builds
uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted'
UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted',
table=agent_builds,
name=uc_name).create()
uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted'
UniqueConstraint('host', 'console_type', 'compute_host', 'deleted',
table=console_pools,
name=uc_name).create()
uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted'
UniqueConstraint('host', 'aggregate_id', 'deleted',
table=aggregate_hosts,
name=uc_name).create()
uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted'
UniqueConstraint('aggregate_id', 'key', 'deleted',
table=aggregate_metadata,
name=uc_name).create()
uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted'
UniqueConstraint('instance_type_id', 'key', 'deleted',
table=instance_type_extra_specs,
name=uc_name).create()
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
# NOTE(dprince): This is now a duplicate index on MySQL and needs to
# be removed there. We leave it here so the Index ordering
# matches on schema diffs (for MySQL).
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id, certificates.c.deleted),
Index('certificates_user_id_deleted_idx', certificates.c.user_id,
certificates.c.deleted),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.deleted,
floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
# group_member
Index('instance_group_member_instance_idx',
group_member.c.instance_id),
# group_metadata
Index('instance_group_metadata_key_idx', group_metadata.c.key),
# group_policy
Index('instance_group_policy_policy_idx', group_policy.c.policy),
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node, instances.c.deleted),
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.deleted,
instance_faults.c.created_at),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
# migrations
Index('migrations_by_host_nodes_and_status_idx',
migrations.c.deleted, migrations.c.source_compute,
migrations.c.dest_compute, migrations.c.source_node,
migrations.c.dest_node, migrations.c.status),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.deleted, migrations.c.instance_uuid,
migrations.c.status),
# networks
Index('networks_host_idx', networks.c.host),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
Index('networks_bridge_deleted_idx', networks.c.bridge,
networks.c.deleted),
Index('networks_project_id_deleted_idx', networks.c.project_id,
networks.c.deleted),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id, networks.c.deleted),
Index('networks_vlan_deleted_idx', networks.c.vlan,
networks.c.deleted),
# project_user_quotas
Index('project_user_quotas_project_id_deleted_idx',
project_user_quotas.c.project_id,
project_user_quotas.c.deleted),
Index('project_user_quotas_user_id_deleted_idx',
project_user_quotas.c.user_id, project_user_quotas.c.deleted),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
Index('ix_reservations_user_id_deleted',
reservations.c.user_id, reservations.c.deleted),
Index('reservations_uuid_idx', reservations.c.uuid),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (deleted, source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
# NOTE(dprince): PostgreSQL doesn't allow duplicate indexes
# so we skip creation of select indexes (so schemas match exactly).
POSTGRES_INDEX_SKIPS = [
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
]
MYSQL_INDEX_SKIPS = [
# we create this one manually for MySQL above
'migrations_by_host_nodes_and_status_idx'
]
for index in common_indexes:
if migrate_engine.name == 'postgresql' and \
index.name in POSTGRES_INDEX_SKIPS:
continue
if migrate_engine.name == 'mysql' and \
index.name in MYSQL_INDEX_SKIPS:
continue
else:
index.create(migrate_engine)
Index('project_id', dns_domains.c.project_id).drop
fkeys = [
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
'security_group_instance_association_instance_uuid_fkey'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[compute_node_stats.c.compute_node_id],
[compute_nodes.c.id],
'fk_compute_node_stats_compute_node_id'],
[[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id'],
[[instance_actions.c.instance_uuid],
[instances.c.uuid],
'fk_instance_actions_instance_uuid'],
[[instance_faults.c.instance_uuid],
[instances.c.uuid],
'fk_instance_faults_instance_uuid'],
[[migrations.c.instance_uuid],
[instances.c.uuid],
'fk_migrations_instance_uuid'],
]
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
# For MySQL we name our fkeys explicitly so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == "mysql":
# In Folsom we explicitly converted migrate_version to UTF8.
sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
# Set default DB charset to UTF8.
sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \
migrate_engine.url.database
migrate_engine.execute(sql)
_create_shadow_tables(migrate_engine)
# populate initial instance types
_populate_instance_types(instance_types)
_create_dump_tables(migrate_engine)
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from Havana is unsupported.')
|
|
#/usr/bin/env python2.7
from db.db_Dao import DRGlanceDao, DRNovaDao
from db.models import Base, DRGlance, DRNova
import logging
import pdb
import time
import keystoneclient.v2_0.client as keystoneclient
from glanceclient import Client
import glanceclient
import ConfigParser
import string,os,sys
import re
def post_handle(message):
cf=ConfigParser.ConfigParser()
logger = logging.getLogger("GlanceHandler")
glanceDao = DRGlanceDao()
cf.read("/home/eshufan/projects/drcontroller/drcontroller/conf/set.conf")
drf_keystone = keystoneclient.Client(auth_url=cf.get("drf","auth_url"),
username= cf.get("drf","user"),
password= cf.get("drf","password"),
tenant_name=cf.get("drf","tenant_name"))
drf_glance_endpoint = drf_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drf_glance = glanceclient.Client('1',drf_glance_endpoint, token=drf_keystone.auth_token)
# print "drf:", drf_glance_endpoint
# pdb.set_trace()
image_id=message['Response']['image']['id']
status = message['Response']['image']['status']
logger.info('Create shadow image for '+ image_id + ' in dr site')
# status=drf_glance.images.get(image_id).status
# count=0
# while (status != 'active') and (status != 'killed'):
# time.sleep(1)
# if status == 'queued':
# count +=1
# else:
# count = 0
# if count == 5:
# glanceDao.add(DRGlance(primary_uuid=image_id,status='queued'))
# break
# status=drf_glance.images.get(image_id).status
if status == 'queued':
glanceDao.add(DRGlance(primary_uuid=image_id,status='queued'))
if status == 'active':
new_data=drf_glance.images.data(image_id)._resp
drc_keystone = keystoneclient.Client(auth_url=cf.get("drc","auth_url"),
username= cf.get("drc","user"),
password= cf.get("drc","password"),
tenant_name=cf.get("drc","tenant_name"))
drc_glance_endpoint = drc_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drc_glance = glanceclient.Client('1',drc_glance_endpoint, token=drc_keystone.auth_token)
image=drc_glance.images.create(name = message['Response']['image']['name']+"_shadow",
container_format = message['Response']['image']['container_format'],
min_ram = message['Response']['image']['min_ram'],
disk_format = message['Response']['image']['disk_format'],
min_disk = message['Response']['image']['min_disk'],
protected = str(message['Response']['image']['protected']),
is_public = str( message['Response']['image']['is_public']),
owner = message['Response']['image']['owner'],
data = new_data )
glanceDao.add(DRGlance(primary_uuid=image_id,secondary_uuid=image.id,status='active'))
logger.info('Shadow image ' + image.id + ' created for ' + image_id)
def delete_handle(message):
glanceDao = DRGlanceDao()
logger = logging.getLogger("GlanceHandler")
url=message['Request']['url'].split('/')
image_id=url[len(url)-1]
cf=ConfigParser.ConfigParser()
cf.read("/home/eshufan/projects/drcontroller/drcontroller/conf/set.conf")
try:
drc_id = glanceDao.get_by_primary_uuid(image_id).secondary_uuid
except:
return
# drf_keystone = keystoneclient.Client(auth_url=cf.get("drf","auth_url"),
# username= cf.get("drf","user"),
# password= cf.get("drf","password"),
# tenant_name=cf.get("drf","tenant_name"))
# drf_glance_endpoint = drf_keystone.service_catalog.url_for(service_type='image',
# endpoint_type='publicURL')
# drf_glance = glanceclient.Client('1',drf_glance_endpoint, token=drf_keystone.auth_token)
# status=drf_glance.images.get(image_id).status
# count=0
# while (status != 'deleted'):
# time.sleep(1)
# count +=1
# if count == 5:
# break
# status=drf_glance.images.get(image_id).status
# if status == 'deleted':
logger.info('Delete shadow image for '+ image_id + ' in dr site')
drc_keystone = keystoneclient.Client(auth_url=cf.get("drc","auth_url"),
username= cf.get("drc","user"),
password= cf.get("drc","password"),
tenant_name=cf.get("drc","tenant_name"))
drc_glance_endpoint = drc_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drc_glance = glanceclient.Client('2',drc_glance_endpoint, token=drc_keystone.auth_token)
if (drc_id != None):
drc_glance.images.delete(drc_id)
glanceDao.delete_by_primary_uuid(image_id)
logger.info('Shadow image ' + drc_id + ' deleted for ' + image_id)
def put_handle(message):
glanceDao = DRGlanceDao()
cf=ConfigParser.ConfigParser()
cf.read("/home/eshufan/projects/drcontroller/drcontroller/conf/set.conf")
image_id=message['Response']['image']['id']
try:
glancedb= glanceDao.get_by_primary_uuid(image_id)
gl_status=glancedb.status
except:
return
if gl_status=='queued':
drf_keystone = keystoneclient.Client(auth_url=cf.get("drf","auth_url"),
username= cf.get("drf","user"),
password= cf.get("drf","password"),
tenant_name=cf.get("drf","tenant_name"))
drf_glance_endpoint = drf_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drf_glance = glanceclient.Client('1',drf_glance_endpoint, token=drf_keystone.auth_token)
# status=drf_glance.images.get(image_id).status
# count=0
# while (status != 'active') and (status != 'killed'):
# time.sleep(1)
# if status == 'queued':
# count +=1
# else:
# count = 0
# if count == 5:
# break
# status=drf_glance.images.get(image_id).status
status=message['Response']['image']['status']
if status == 'active':
new_data=drf_glance.images.data(image_id)._resp
drc_keystone = keystoneclient.Client(auth_url=cf.get("drc","auth_url"),
username= cf.get("drc","user"),
password= cf.get("drc","password"),
tenant_name=cf.get("drc","tenant_name"))
drc_glance_endpoint = drc_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drc_glance = glanceclient.Client('1',drc_glance_endpoint, token=drc_keystone.auth_token)
image=drc_glance.images.create(name = message['Response']['image']['name']+"_shadow",
container_format = message['Response']['image']['container_format'],
min_ram = message['Response']['image']['min_ram'],
disk_format = message['Response']['image']['disk_format'],
min_disk = message['Response']['image']['min_disk'],
protected = str(message['Response']['image']['protected']),
is_public = str( message['Response']['image']['is_public']),
owner = message['Response']['image']['owner'],
data = new_data )
glanceDao.delete_by_primary_uuid(image_id)
glanceDao.add(DRGlance(primary_uuid=image_id,secondary_uuid=image.id,status='active'))
if gl_status=='active':
try:
drc_id = glanceDao.get_by_primary_uuid(image_id).secondary_uuid
except:
return
drc_keystone = keystoneclient.Client(auth_url=cf.get("drc","auth_url"),
username= cf.get("drc","user"),
password= cf.get("drc","password"),
tenant_name=cf.get("drc","tenant_name"))
drc_glance_endpoint = drc_keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
drc_glance = glanceclient.Client('1',drc_glance_endpoint, token=drc_keystone.auth_token)
image=drc_glance.images.update(image=drc_id,
name = message['Response']['image']['name']+"_shadow",
container_format = message['Response']['image']['container_format'],
min_ram = message['Response']['image']['min_ram'],
disk_format = message['Response']['image']['disk_format'],
min_disk = message['Response']['image']['min_disk'],
protected = str(message['Response']['image']['protected']),
is_public = str( message['Response']['image']['is_public']),
owner = message['Response']['image']['owner']
)
def test():
changelog = logging.getLogger("GlanceHandler")
changelog.info("testhandle")
novaDao = DRNovaDao(DRNova)
changelog.info(novaDao.get_by_primary_uuid("test").secondary_uuid)
class GlanceHandler(object):
def __init__(self):
self.logger = logging.getLogger("GlanceHandler")
self.logger.info('Init GlanceHandler')
def accept(self, *req, **kwargs):
self.logger.info("Glance request accept")
if len(req)>0:
for i in range(0,len(req)):
if req[i] != {}:
env=req[i].body
if len(env)>0:
message = eval(env.replace('null','None').replace('false','False').replace('true','True'))
if message['Request']['type']=='POST':
pattern = re.compile(r'http://.*/v./images$')
match = pattern.match(message['Request']['url'])
if match:
post_handle(message)
elif message['Request']['type']=='DELETE':
pattern = re.compile(r'http://.*/v./images/.{36}$')
match = pattern.match(message['Request']['url'])
if match:
delete_handle(message)
else:
pattern = re.compile(r'http://.*/v./images/.{36}$')
match = pattern.match(message['Request']['url'])
if match:
put_handle(message)
return ['Hello Glance']
|
|
"""Simple code for training an RNN for motion prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
# Learning
tf.app.flags.DEFINE_float("learning_rate", .005, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.95, "Learning rate is multiplied by this much. 1 means no decay.")
tf.app.flags.DEFINE_integer("learning_rate_step", 10000, "Every this many steps, do decay.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5, "Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 16, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("iterations", int(1e5), "Iterations to train for.")
# Architecture
tf.app.flags.DEFINE_string("architecture", "tied", "Seq2seq architecture to use: [basic, tied].")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 1, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("seq_length_in", 50, "Number of frames to feed into the encoder. 25 fps")
tf.app.flags.DEFINE_integer("seq_length_out", 10, "Number of frames that the decoder has to predict. 25fps")
tf.app.flags.DEFINE_boolean("omit_one_hot", False, "Whether to remove one-hot encoding from the data")
tf.app.flags.DEFINE_boolean("residual_velocities", False, "Add a residual connection that effectively models velocities")
# Directories
tf.app.flags.DEFINE_string("data_dir", os.path.normpath("./data/h3.6m/dataset"), "Data directory")
tf.app.flags.DEFINE_string("train_dir", os.path.normpath("./experiments/"), "Training directory.")
tf.app.flags.DEFINE_string("action","all", "The action to train on. all means all the actions, all_periodic means walking, eating and smoking")
tf.app.flags.DEFINE_string("loss_to_use","sampling_based", "The type of loss to use, supervised or sampling_based")
tf.app.flags.DEFINE_integer("test_every", 1000, "How often to compute error on the test set.")
tf.app.flags.DEFINE_integer("save_every", 1000, "How often to compute error on the test set.")
tf.app.flags.DEFINE_boolean("sample", False, "Set to True for sampling.")
tf.app.flags.DEFINE_boolean("use_cpu", False, "Whether to use the CPU")
tf.app.flags.DEFINE_integer("load", 0, "Try to load a previous checkpoint.")
FLAGS = tf.app.flags.FLAGS
train_dir = os.path.normpath(os.path.join( FLAGS.train_dir, FLAGS.action,
'out_{0}'.format(FLAGS.seq_length_out),
'iterations_{0}'.format(FLAGS.iterations),
FLAGS.architecture,
FLAGS.loss_to_use,
'omit_one_hot' if FLAGS.omit_one_hot else 'one_hot',
'depth_{0}'.format(FLAGS.num_layers),
'size_{0}'.format(FLAGS.size),
'lr_{0}'.format(FLAGS.learning_rate),
'residual_vel' if FLAGS.residual_velocities else 'not_residual_vel'))
summaries_dir = os.path.normpath(os.path.join( train_dir, "log" )) # Directory for TB summaries
def create_model(session, actions, sampling=False):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.architecture,
FLAGS.seq_length_in if not sampling else 50,
FLAGS.seq_length_out if not sampling else 100,
FLAGS.size, # hidden layer size
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
summaries_dir,
FLAGS.loss_to_use if not sampling else "sampling_based",
len( actions ),
not FLAGS.omit_one_hot,
FLAGS.residual_velocities,
dtype=tf.float32)
if FLAGS.load <= 0:
print("Creating model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename="checkpoint")
print( "train_dir", train_dir )
if ckpt and ckpt.model_checkpoint_path:
# Check if the specific checkpoint exists
if FLAGS.load > 0:
if os.path.isfile(os.path.join(train_dir,"checkpoint-{0}.index".format(FLAGS.load))):
ckpt_name = os.path.normpath(os.path.join( os.path.join(train_dir,"checkpoint-{0}".format(FLAGS.load)) ))
else:
raise ValueError("Asked to load checkpoint {0}, but it does not seem to exist".format(FLAGS.load))
else:
ckpt_name = os.path.basename( ckpt.model_checkpoint_path )
print("Loading model {0}".format( ckpt_name ))
model.saver.restore( session, ckpt.model_checkpoint_path )
return model
else:
print("Could not find checkpoint. Aborting.")
raise( ValueError, "Checkpoint {0} does not seem to exist".format( ckpt.model_checkpoint_path ) )
return model
def train():
"""Train a seq2seq model on human motion"""
actions = define_actions( FLAGS.action )
number_of_actions = len( actions )
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, FLAGS.seq_length_in, FLAGS.seq_length_out, FLAGS.data_dir, not FLAGS.omit_one_hot )
# Limit TF to take a fraction of the GPU memory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, device_count = device_count )) as sess:
# === Create the model ===
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model( sess, actions )
model.train_writer.add_graph( sess.graph )
print( "Model created" )
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not FLAGS.omit_one_hot )
#=== This is the training loop ===
step_time, loss, val_loss = 0.0, 0.0, 0.0
current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1
previous_losses = []
step_time, loss = 0, 0
for _ in xrange( FLAGS.iterations ):
start_time = time.time()
# === Training step ===
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch( train_set, not FLAGS.omit_one_hot )
_, step_loss, loss_summary, lr_summary = model.step( sess, encoder_inputs, decoder_inputs, decoder_outputs, False )
model.train_writer.add_summary( loss_summary, current_step )
model.train_writer.add_summary( lr_summary, current_step )
if current_step % 10 == 0:
print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss ))
step_time += (time.time() - start_time) / FLAGS.test_every
loss += step_loss / FLAGS.test_every
current_step += 1
# === step decay ===
if current_step % FLAGS.learning_rate_step == 0:
sess.run(model.learning_rate_decay_op)
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.test_every == 0:
# === Validation with randomly chosen seeds ===
forward_only = True
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch( test_set, not FLAGS.omit_one_hot )
step_loss, loss_summary = model.step(sess,
encoder_inputs, decoder_inputs, decoder_outputs, forward_only)
val_loss = step_loss # Loss book-keeping
model.test_writer.add_summary(loss_summary, current_step)
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
# === Validation with srnn's seeds ===
for action in actions:
# Evaluate the model on the test batches
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
srnn_loss, srnn_poses, _ = model.step(sess, encoder_inputs, decoder_inputs,
decoder_outputs, True, True)
# Denormalize the output
srnn_pred_expmap = data_utils.revert_output_format( srnn_poses,
data_mean, data_std, dim_to_ignore, actions, not FLAGS.omit_one_hot )
# Save the errors here
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
# Training is done in exponential map, but the error is reported in
# Euler angles, as in previous work.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
N_SEQUENCE_TEST = 8
for i in np.arange(N_SEQUENCE_TEST):
eulerchannels_pred = srnn_pred_expmap[i]
# Convert from exponential map to Euler angles
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
# The global translation (first 3 entries) and global rotation
# (next 3 entries) are also not considered in the error, so the_key
# are set to zero.
# See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
gt_i=np.copy(srnn_gts_euler[action][i])
gt_i[:,0:6] = 0
# Now compute the l2 error. The following is numpy port of the error
# function provided by Ashesh Jain (in matlab), available at
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
idx_to_use = np.where( np.std( gt_i, 0 ) > 1e-4 )[0]
euc_error = np.power( gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
# This is simply the mean error over the N_SEQUENCE_TEST examples
mean_mean_errors = np.mean( mean_errors, 0 )
# Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if FLAGS.seq_length_out >= ms+1:
print(" {0:.3f} |".format( mean_mean_errors[ms] ), end="")
else:
print(" n/a |", end="")
print()
# Ugly massive if-then to log the error to tensorboard :shrug:
if action == "walking":
summaries = sess.run(
[model.walking_err80_summary,
model.walking_err160_summary,
model.walking_err320_summary,
model.walking_err400_summary,
model.walking_err560_summary,
model.walking_err1000_summary],
{model.walking_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.walking_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.walking_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.walking_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.walking_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.walking_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "eating":
summaries = sess.run(
[model.eating_err80_summary,
model.eating_err160_summary,
model.eating_err320_summary,
model.eating_err400_summary,
model.eating_err560_summary,
model.eating_err1000_summary],
{model.eating_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.eating_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.eating_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.eating_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.eating_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.eating_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "smoking":
summaries = sess.run(
[model.smoking_err80_summary,
model.smoking_err160_summary,
model.smoking_err320_summary,
model.smoking_err400_summary,
model.smoking_err560_summary,
model.smoking_err1000_summary],
{model.smoking_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.smoking_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.smoking_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.smoking_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.smoking_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.smoking_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "discussion":
summaries = sess.run(
[model.discussion_err80_summary,
model.discussion_err160_summary,
model.discussion_err320_summary,
model.discussion_err400_summary,
model.discussion_err560_summary,
model.discussion_err1000_summary],
{model.discussion_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.discussion_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.discussion_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.discussion_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.discussion_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.discussion_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "directions":
summaries = sess.run(
[model.directions_err80_summary,
model.directions_err160_summary,
model.directions_err320_summary,
model.directions_err400_summary,
model.directions_err560_summary,
model.directions_err1000_summary],
{model.directions_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.directions_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.directions_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.directions_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.directions_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.directions_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "greeting":
summaries = sess.run(
[model.greeting_err80_summary,
model.greeting_err160_summary,
model.greeting_err320_summary,
model.greeting_err400_summary,
model.greeting_err560_summary,
model.greeting_err1000_summary],
{model.greeting_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.greeting_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.greeting_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.greeting_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.greeting_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.greeting_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "phoning":
summaries = sess.run(
[model.phoning_err80_summary,
model.phoning_err160_summary,
model.phoning_err320_summary,
model.phoning_err400_summary,
model.phoning_err560_summary,
model.phoning_err1000_summary],
{model.phoning_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.phoning_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.phoning_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.phoning_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.phoning_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.phoning_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "posing":
summaries = sess.run(
[model.posing_err80_summary,
model.posing_err160_summary,
model.posing_err320_summary,
model.posing_err400_summary,
model.posing_err560_summary,
model.posing_err1000_summary],
{model.posing_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.posing_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.posing_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.posing_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.posing_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.posing_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "purchases":
summaries = sess.run(
[model.purchases_err80_summary,
model.purchases_err160_summary,
model.purchases_err320_summary,
model.purchases_err400_summary,
model.purchases_err560_summary,
model.purchases_err1000_summary],
{model.purchases_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.purchases_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.purchases_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.purchases_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.purchases_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.purchases_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "sitting":
summaries = sess.run(
[model.sitting_err80_summary,
model.sitting_err160_summary,
model.sitting_err320_summary,
model.sitting_err400_summary,
model.sitting_err560_summary,
model.sitting_err1000_summary],
{model.sitting_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.sitting_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.sitting_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.sitting_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.sitting_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.sitting_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "sittingdown":
summaries = sess.run(
[model.sittingdown_err80_summary,
model.sittingdown_err160_summary,
model.sittingdown_err320_summary,
model.sittingdown_err400_summary,
model.sittingdown_err560_summary,
model.sittingdown_err1000_summary],
{model.sittingdown_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.sittingdown_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.sittingdown_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.sittingdown_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.sittingdown_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.sittingdown_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "takingphoto":
summaries = sess.run(
[model.takingphoto_err80_summary,
model.takingphoto_err160_summary,
model.takingphoto_err320_summary,
model.takingphoto_err400_summary,
model.takingphoto_err560_summary,
model.takingphoto_err1000_summary],
{model.takingphoto_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.takingphoto_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.takingphoto_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.takingphoto_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.takingphoto_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.takingphoto_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "waiting":
summaries = sess.run(
[model.waiting_err80_summary,
model.waiting_err160_summary,
model.waiting_err320_summary,
model.waiting_err400_summary,
model.waiting_err560_summary,
model.waiting_err1000_summary],
{model.waiting_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.waiting_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.waiting_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.waiting_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.waiting_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.waiting_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "walkingdog":
summaries = sess.run(
[model.walkingdog_err80_summary,
model.walkingdog_err160_summary,
model.walkingdog_err320_summary,
model.walkingdog_err400_summary,
model.walkingdog_err560_summary,
model.walkingdog_err1000_summary],
{model.walkingdog_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.walkingdog_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.walkingdog_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.walkingdog_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.walkingdog_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.walkingdog_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
elif action == "walkingtogether":
summaries = sess.run(
[model.walkingtogether_err80_summary,
model.walkingtogether_err160_summary,
model.walkingtogether_err320_summary,
model.walkingtogether_err400_summary,
model.walkingtogether_err560_summary,
model.walkingtogether_err1000_summary],
{model.walkingtogether_err80: mean_mean_errors[1] if FLAGS.seq_length_out >= 2 else None,
model.walkingtogether_err160: mean_mean_errors[3] if FLAGS.seq_length_out >= 4 else None,
model.walkingtogether_err320: mean_mean_errors[7] if FLAGS.seq_length_out >= 8 else None,
model.walkingtogether_err400: mean_mean_errors[9] if FLAGS.seq_length_out >= 10 else None,
model.walkingtogether_err560: mean_mean_errors[13] if FLAGS.seq_length_out >= 14 else None,
model.walkingtogether_err1000: mean_mean_errors[24] if FLAGS.seq_length_out >= 25 else None})
for i in np.arange(len( summaries )):
model.test_writer.add_summary(summaries[i], current_step)
print()
print("============================\n"
"Global step: %d\n"
"Learning rate: %.4f\n"
"Step-time (ms): %.4f\n"
"Train loss avg: %.4f\n"
"--------------------------\n"
"Val loss: %.4f\n"
"srnn loss: %.4f\n"
"============================" % (model.global_step.eval(),
model.learning_rate.eval(), step_time*1000, loss,
val_loss, srnn_loss))
print()
previous_losses.append(loss)
# Save the model
if current_step % FLAGS.save_every == 0:
print( "Saving the model..." ); start_time = time.time()
model.saver.save(sess, os.path.normpath(os.path.join(train_dir, 'checkpoint')), global_step=current_step )
print( "done in {0:.2f} ms".format( (time.time() - start_time)*1000) )
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def get_srnn_gts( actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
"""
Get the ground truths for srnn's sequences, and convert to Euler angles.
(the error is always computed in Euler angles).
Args
actions: a list of actions to get ground truths for.
model: training model we are using (we only use the "get_batch" method).
test_set: dictionary with normalized training data.
data_mean: d-long vector with the mean of the training data.
data_std: d-long vector with the standard deviation of the training data.
dim_to_ignore: dimensions that we are not using to train/predict.
one_hot: whether the data comes with one-hot encoding indicating action.
to_euler: whether to convert the angles to Euler format or keep thm in exponential map
Returns
srnn_gts_euler: a dictionary where the keys are actions, and the values
are the ground_truth, denormalized expected outputs of srnns's seeds.
"""
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def sample():
"""Sample predictions for srnn's seeds"""
if FLAGS.load <= 0:
raise( ValueError, "Must give an iteration to read parameters from")
actions = define_actions( FLAGS.action )
# Use the CPU if asked to
device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
# === Create the model ===
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
sampling = True
model = create_model(sess, actions, sampling)
print("Model created")
# Load all the data
train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
actions, FLAGS.seq_length_in, FLAGS.seq_length_out, FLAGS.data_dir, not FLAGS.omit_one_hot )
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
srnn_gts_expmap = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not FLAGS.omit_one_hot, to_euler=False )
srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
data_std, dim_to_ignore, not FLAGS.omit_one_hot )
# Clean and create a new h5 file of samples
SAMPLES_FNAME = 'samples.h5'
try:
os.remove( SAMPLES_FNAME )
except OSError:
pass
# Predict and save for each action
for action in actions:
# Make prediction with srnn' seeds
encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
forward_only = True
srnn_seeds = True
srnn_loss, srnn_poses, _ = model.step(sess, encoder_inputs, decoder_inputs, decoder_outputs, forward_only, srnn_seeds)
# denormalizes too
srnn_pred_expmap = data_utils.revert_output_format( srnn_poses, data_mean, data_std, dim_to_ignore, actions, not FLAGS.omit_one_hot )
# Save the conditioning seeds
# Save the samples
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
for i in np.arange(8):
# Save conditioning ground truth
node_name = 'expmap/gt/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_gts_expmap[action][i] )
# Save prediction
node_name = 'expmap/preds/{1}_{0}'.format(i, action)
hf.create_dataset( node_name, data=srnn_pred_expmap[i] )
# Compute and save the errors here
mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
for i in np.arange(8):
eulerchannels_pred = srnn_pred_expmap[i]
for j in np.arange( eulerchannels_pred.shape[0] ):
for k in np.arange(3,97,3):
eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
eulerchannels_pred[:,0:6] = 0
# Pick only the dimensions with sufficient standard deviation. Others are ignored.
idx_to_use = np.where( np.std( eulerchannels_pred, 0 ) > 1e-4 )[0]
euc_error = np.power( srnn_gts_euler[action][i][:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt( euc_error )
mean_errors[i,:] = euc_error
mean_mean_errors = np.mean( mean_errors, 0 )
print( action )
print( ','.join(map(str, mean_mean_errors.tolist() )) )
with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
node_name = 'mean_{0}_error'.format( action )
hf.create_dataset( node_name, data=mean_mean_errors )
return
def define_actions( action ):
"""
Define the list of actions we are using.
Args
action: String with the passed action. Could be "all"
Returns
actions: List of strings of actions
Raises
ValueError if the action is not included in H3.6M
"""
actions = ["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
if action in actions:
return [action]
if action == "all":
return actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise( ValueError, "Unrecognized action: %d" % action )
def read_all_data( actions, seq_length_in, seq_length_out, data_dir, one_hot ):
"""
Loads data for training/testing and normalizes it.
Args
actions: list of strings (actions) to load
seq_length_in: number of frames to use in the burn-in sequence
seq_length_out: number of frames to use in the output sequence
data_dir: directory to load the data from
one_hot: whether to use one-hot encoding per action
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print ("Reading training data (seq_len_in: {0}, seq_len_out {1}).".format(
seq_length_in, seq_length_out))
train_subject_ids = [1,6,7,8,9,11]
test_subject_ids = [5]
train_set, complete_train = data_utils.load_data( data_dir, train_subject_ids, actions, one_hot )
test_set, complete_test = data_utils.load_data( data_dir, test_subject_ids, actions, one_hot )
# Compute normalization stats
data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
# Normalize -- subtract mean, divide by stdev
train_set = data_utils.normalize_data( train_set, data_mean, data_std, dim_to_use, actions, one_hot )
test_set = data_utils.normalize_data( test_set, data_mean, data_std, dim_to_use, actions, one_hot )
print("done reading data.")
return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use
def main(_):
if FLAGS.sample:
sample()
else:
train()
if __name__ == "__main__":
tf.app.run()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juergen Brendel, Cisco Systems Inc.
# @author: Abhishek Raut, Cisco Systems Inc.
# @author: Sourabh Patwardhan, Cisco Systems Inc.
from mock import patch
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron import context
import neutron.db.api as db
from neutron.extensions import portbindings
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import extensions
from neutron.plugins.cisco.extensions import n1kv
from neutron.plugins.cisco.extensions import network_profile
from neutron.plugins.cisco.n1kv import n1kv_client
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.cisco.n1kv import fake_client
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
PHYS_NET = 'some-phys-net'
VLAN_MIN = 100
VLAN_MAX = 110
class FakeResponse(object):
"""
This object is returned by mocked httplib instead of a normal response.
Initialize it with the status code, content type and buffer contents
you wish to return.
"""
def __init__(self, status, response_text, content_type):
self.buffer = response_text
self.status = status
def __getitem__(cls, val):
return "application/xml"
def read(self, *args, **kwargs):
return self.buffer
def _fake_setup_vsm(self):
"""Fake establish Communication with Cisco Nexus1000V VSM."""
self.agent_vsm = True
self._poll_policies(event_type="port_profile")
class NetworkProfileTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
network_profile.RESOURCE_ATTRIBUTE_MAP)
return network_profile.Network_profile.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.cisco.n1kv.'
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
tenant_id = "some_tenant"
DEFAULT_RESP_BODY = ""
DEFAULT_RESP_CODE = 200
DEFAULT_CONTENT_TYPE = ""
fmt = "json"
def _make_test_policy_profile(self, name='service_profile'):
"""
Create a policy profile record for testing purpose.
:param name: string representing the name of the policy profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
"""
uuid = test_api_v2._uuid()
profile = {'id': uuid,
'name': name}
return n1kv_db_v2.create_policy_profile(profile)
def _make_test_profile(self,
name='default_network_profile',
segment_range='386-400'):
"""
Create a profile record for testing purposes.
:param name: string representing the name of the network profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
:param segment_range: string representing the segment range for network
profile.
"""
db_session = db.get_session()
profile = {'name': name,
'segment_type': 'vlan',
'physical_network': PHYS_NET,
'tenant_id': self.tenant_id,
'segment_range': segment_range}
net_p = n1kv_db_v2.create_network_profile(db_session, profile)
n1kv_db_v2.sync_vlan_allocations(db_session, net_p)
return net_p
def setUp(self):
"""
Setup method for n1kv plugin tests.
First step is to define an acceptable response from the VSM to
our requests. This needs to be done BEFORE the setUp() function
of the super-class is called.
This default here works for many cases. If you need something
extra, please define your own setUp() function in your test class,
and set your DEFAULT_RESPONSE value also BEFORE calling the
setUp() of the super-function (this one here). If you have set
a value already, it will not be overwritten by this code.
"""
if not self.DEFAULT_RESP_BODY:
self.DEFAULT_RESP_BODY = (
"""<?xml version="1.0" encoding="utf-8"?>
<set name="events_set">
<instance name="1" url="/api/hyper-v/events/1">
<properties>
<cmd>configure terminal ; port-profile type vethernet grizzlyPP
(SUCCESS)
</cmd>
<id>42227269-e348-72ed-bdb7-7ce91cd1423c</id>
<time>1369223611</time>
<name>grizzlyPP</name>
</properties>
</instance>
<instance name="2" url="/api/hyper-v/events/2">
<properties>
<cmd>configure terminal ; port-profile type vethernet havanaPP
(SUCCESS)
</cmd>
<id>3fc83608-ae36-70e7-9d22-dec745623d06</id>
<time>1369223661</time>
<name>havanaPP</name>
</properties>
</instance>
</set>
""")
# Creating a mock HTTP connection object for httplib. The N1KV client
# interacts with the VSM via HTTP. Since we don't have a VSM running
# in the unit tests, we need to 'fake' it by patching the HTTP library
# itself. We install a patch for a fake HTTP connection class.
# Using __name__ to avoid having to enter the full module path.
http_patcher = patch(n1kv_client.httplib2.__name__ + ".Http")
FakeHttpConnection = http_patcher.start()
# Now define the return values for a few functions that may be called
# on any instance of the fake HTTP connection class.
instance = FakeHttpConnection.return_value
instance.getresponse.return_value = (FakeResponse(
self.DEFAULT_RESP_CODE,
self.DEFAULT_RESP_BODY,
'application/xml'))
instance.request.return_value = (instance.getresponse.return_value,
self.DEFAULT_RESP_BODY)
# Patch some internal functions in a few other parts of the system.
# These help us move along, without having to mock up even more systems
# in the background.
# Return a dummy VSM IP address
get_vsm_hosts_patcher = patch(n1kv_client.__name__ +
".Client._get_vsm_hosts")
fake_get_vsm_hosts = get_vsm_hosts_patcher.start()
fake_get_vsm_hosts.return_value = ["127.0.0.1"]
# Return dummy user profiles
get_cred_name_patcher = patch(cdb.__name__ + ".get_credential_name")
fake_get_cred_name = get_cred_name_patcher.start()
fake_get_cred_name.return_value = {"user_name": "admin",
"password": "admin_password"}
n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm
neutron_extensions.append_api_extensions_path(extensions.__path__)
ext_mgr = NetworkProfileTestExtensionManager()
# Save the original RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items():
self.saved_attr_map[resource] = attrs.copy()
# Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs.
attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update(
n1kv.EXTENDED_ATTRIBUTES_2_0["networks"])
attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update(
n1kv.EXTENDED_ATTRIBUTES_2_0["ports"])
self.addCleanup(self.restore_resource_attribute_map)
self.addCleanup(db.clear_db)
super(N1kvPluginTestCase, self).setUp(self._plugin_name,
ext_mgr=ext_mgr)
# Create some of the database entries that we require.
self._make_test_profile()
self._make_test_policy_profile()
def restore_resource_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_plugin(self):
self._make_network('json',
'some_net',
True,
tenant_id=self.tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', self.tenant_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
body = self.deserialize('json', res)
self.assertIn('tenant_id', body['networks'][0])
class TestN1kvNetworkProfiles(N1kvPluginTestCase):
def _prepare_net_profile_data(self, segment_type):
netp = {'network_profile': {'name': 'netp1',
'segment_type': segment_type,
'tenant_id': self.tenant_id}}
if segment_type == 'vlan':
netp['network_profile']['segment_range'] = '100-110'
netp['network_profile']['physical_network'] = PHYS_NET
elif segment_type == 'overlay':
netp['network_profile']['segment_range'] = '10000-10010'
netp['network_profile']['sub_type'] = 'enhanced' or 'native_vxlan'
netp['network_profile']['multicast_ip_range'] = ("224.1.1.1-"
"224.1.1.10")
return netp
def test_create_network_profile_vlan(self):
data = self._prepare_net_profile_data('vlan')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_overlay(self):
data = self._prepare_net_profile_data('overlay')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_overlay_unreasonable_seg_range(self):
data = self._prepare_net_profile_data('overlay')
data['network_profile']['segment_range'] = '10000-100000000001'
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_plugin(self):
net_p_dict = self._prepare_net_profile_data('overlay')
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
data = {'network_profile': {'name': 'netp2'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 200)
def test_update_network_profile_physical_network_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'physical_network': PHYS_NET}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_segment_type_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'segment_type': 'overlay'}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_sub_type_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
data = {'network_profile': {'sub_type': 'vlan'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_create_overlay_network_profile_invalid_multicast_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
data = {'network_profile': {'sub_type': 'native_vxlan',
'multicast_ip_range': '1.1.1.1'}}
net_p_req = self.new_create_request('network_profiles', data,
net_p_dict)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_no_multicast_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
data = {'network_profile': {'sub_type': 'native_vxlan',
'multicast_ip_range': ''}}
net_p_req = self.new_create_request('network_profiles', data,
net_p_dict)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_wrong_split_multicast_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
data = {'network_profile': {
'sub_type': 'native_vxlan',
'multicast_ip_range': '224.1.1.1.224.1.1.3'}}
net_p_req = self.new_create_request('network_profiles', data,
net_p_dict)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_invalid_minip_multicast_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
data = {'network_profile': {
'sub_type': 'native_vxlan',
'multicast_ip_range': '10.0.0.1-224.1.1.3'}}
net_p_req = self.new_create_request('network_profiles', data,
net_p_dict)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_invalid_maxip_multicast_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
data = {'network_profile': {
'sub_type': 'native_vxlan',
'multicast_ip_range': '224.1.1.1-20.0.0.1'}}
net_p_req = self.new_create_request('network_profiles', data,
net_p_dict)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_correct_multicast_pass(self):
data = self._prepare_net_profile_data('overlay')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_populate_vlan_segment_pool(self):
db_session = db.get_session()
net_p_dict = self._prepare_net_profile_data('vlan')
net_p_req = self.new_create_request('network_profiles', net_p_dict)
self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
vlan))
self.assertFalse(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
vlan).allocated)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
VLAN_MIN - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
VLAN_MAX + 1)
def test_delete_network_profile_deallocate_vlan_segment_pool(self):
db_session = db.get_session()
net_p_dict = self._prepare_net_profile_data('vlan')
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
VLAN_MIN))
self._delete('network_profiles', net_p['network_profile']['id'])
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
vlan)
class TestN1kvBasicGet(test_plugin.TestBasicGet,
N1kvPluginTestCase):
pass
class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse,
N1kvPluginTestCase):
pass
class TestN1kvPorts(test_plugin.TestPortsV2,
N1kvPluginTestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def test_create_port_with_default_n1kv_policy_profile_id(self):
"""Test port create without passing policy profile id."""
with self.port() as port:
db_session = db.get_session()
pp = n1kv_db_v2.get_policy_profile(
db_session, port['port'][n1kv.PROFILE_ID])
self.assertEqual(pp['name'], 'service_profile')
def test_create_port_with_n1kv_policy_profile_id(self):
"""Test port create with policy profile id."""
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id']}}
port_req = self.new_create_request('ports', data)
port = self.deserialize(self.fmt,
port_req.get_response(self.api))
self.assertEqual(port['port'][n1kv.PROFILE_ID],
profile_obj.id)
self._delete('ports', port['port']['id'])
def test_update_port_with_n1kv_policy_profile_id(self):
"""Test port update failure while updating policy profile id."""
with self.port() as port:
data = {'port': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
port_req = self.new_update_request('ports',
data,
port['port']['id'])
res = port_req.get_response(self.api)
# Port update should fail to update policy profile id.
self.assertEqual(res.status_int, 400)
def test_create_first_port_invalid_parameters_fail(self):
"""Test parameters for first port create sent to the VSM."""
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
client_patch = patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidRequest)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id'],
}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, 500)
client_patch.stop()
def test_create_next_port_invalid_parameters_fail(self):
"""Test parameters for subsequent port create sent to the VSM."""
with self.port() as port:
client_patch = patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidRequest)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: port['port']['n1kv:profile_id'],
'tenant_id': port['port']['tenant_id'],
'network_id': port['port']['network_id']}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, 500)
client_patch.stop()
class TestN1kvNetworks(test_plugin.TestNetworksV2,
N1kvPluginTestCase):
def _prepare_net_data(self, net_profile_id):
return {'network': {'name': 'net1',
n1kv.PROFILE_ID: net_profile_id,
'tenant_id': self.tenant_id}}
def test_create_network_with_default_n1kv_network_profile_id(self):
"""Test network create without passing network profile id."""
with self.network() as network:
db_session = db.get_session()
np = n1kv_db_v2.get_network_profile(
db_session, network['network'][n1kv.PROFILE_ID])
self.assertEqual(np['name'], 'default_network_profile')
def test_create_network_with_n1kv_network_profile_id(self):
"""Test network create with network profile id."""
profile_obj = self._make_test_profile(name='test_profile')
data = self._prepare_net_data(profile_obj.id)
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual(network['network'][n1kv.PROFILE_ID],
profile_obj.id)
def test_update_network_with_n1kv_network_profile_id(self):
"""Test network update failure while updating network profile id."""
with self.network() as network:
data = {'network': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
network_req = self.new_update_request('networks',
data,
network['network']['id'])
res = network_req.get_response(self.api)
# Network update should fail to update network profile id.
self.assertEqual(res.status_int, 400)
class TestN1kvSubnets(test_plugin.TestSubnetsV2,
N1kvPluginTestCase):
pass
|
|
import os
import sys
from rpython.annotator import model as annmodel
from rpython.rlib._os_support import _WIN32, StringTraits, UnicodeTraits
from rpython.rlib.objectmodel import enforceargs
# importing rposix here creates a cycle on Windows
from rpython.rtyper.controllerentry import Controller
from rpython.rtyper.extfunc import register_external
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.translator.tool.cbuild import ExternalCompilationInfo
str0 = annmodel.s_Str0
# ____________________________________________________________
#
# Annotation support to control access to 'os.environ' in the RPython
# program
class OsEnvironController(Controller):
knowntype = os.environ.__class__
def convert(self, obj):
# 'None' is good enough, there is only one os.environ
return None
def getitem(self, obj, key):
# in the RPython program reads of 'os.environ[key]' are
# redirected here
result = r_getenv(key)
if result is None:
raise KeyError
return result
@enforceargs(None, None, str0, None)
def setitem(self, obj, key, value):
# in the RPython program, 'os.environ[key] = value' is
# redirected here
r_putenv(key, value)
def delitem(self, obj, key):
# in the RPython program, 'del os.environ[key]' is redirected
# here
absent = r_getenv(key) is None
# Always call unsetenv(), to get eventual OSErrors
r_unsetenv(key)
if absent:
raise KeyError
def get_keys(self, obj):
# 'os.environ.keys' is redirected here - note that it's the
# getattr that arrives here, not the actual method call!
return r_envkeys
def get_items(self, obj):
# 'os.environ.items' is redirected here (not the actual method
# call!)
return r_envitems
def get_get(self, obj):
# 'os.environ.get' is redirected here (not the actual method
# call!)
return r_getenv
# ____________________________________________________________
# Access to the 'environ' external variable
prefix = ''
if sys.platform.startswith('darwin'):
CCHARPPP = rffi.CArrayPtr(rffi.CCHARPP)
_os_NSGetEnviron = rffi.llexternal(
'_NSGetEnviron', [], CCHARPPP,
compilation_info=ExternalCompilationInfo(includes=['crt_externs.h'])
)
def os_get_environ():
return _os_NSGetEnviron()[0]
elif _WIN32:
eci = ExternalCompilationInfo(includes=['stdlib.h'])
CWCHARPP = lltype.Ptr(lltype.Array(rffi.CWCHARP, hints={'nolength': True}))
os_get_environ, _os_set_environ = rffi.CExternVariable(
rffi.CCHARPP, '_environ', eci)
get__wenviron, _set__wenviron = rffi.CExternVariable(
CWCHARPP, '_wenviron', eci, c_type='wchar_t **')
prefix = '_'
else:
os_get_environ, _os_set_environ = rffi.CExternVariable(
rffi.CCHARPP, 'environ', ExternalCompilationInfo())
# ____________________________________________________________
#
# Lower-level interface: dummy placeholders and external registations
def r_envkeys():
just_a_placeholder
def envkeys_llimpl():
environ = os_get_environ()
result = []
i = 0
while environ[i]:
name_value = rffi.charp2str(environ[i])
p = name_value.find('=')
if p >= 0:
result.append(name_value[:p])
i += 1
return result
register_external(r_envkeys, [], [str0], # returns a list of strings
export_name='ll_os.ll_os_envkeys',
llimpl=envkeys_llimpl)
# ____________________________________________________________
def r_envitems():
just_a_placeholder
def r_getenv(name):
just_a_placeholder # should return None if name not found
def r_putenv(name, value):
just_a_placeholder
os_getenv = rffi.llexternal('getenv', [rffi.CCHARP], rffi.CCHARP,
releasegil=False)
os_putenv = rffi.llexternal(prefix + 'putenv', [rffi.CCHARP], rffi.INT,
save_err=rffi.RFFI_SAVE_ERRNO)
if _WIN32:
_wgetenv = rffi.llexternal('_wgetenv', [rffi.CWCHARP], rffi.CWCHARP,
compilation_info=eci, releasegil=False)
_wputenv = rffi.llexternal('_wputenv', [rffi.CWCHARP], rffi.INT,
compilation_info=eci,
save_err=rffi.RFFI_SAVE_LASTERROR)
class EnvKeepalive:
pass
envkeepalive = EnvKeepalive()
envkeepalive.byname = {}
envkeepalive.bywname = {}
def make_env_impls(win32=False):
if not win32:
traits = StringTraits()
get_environ, getenv, putenv = os_get_environ, os_getenv, os_putenv
byname, eq = envkeepalive.byname, '='
def last_error(msg):
from rpython.rlib import rposix
raise OSError(rposix.get_saved_errno(), msg)
else:
traits = UnicodeTraits()
get_environ, getenv, putenv = get__wenviron, _wgetenv, _wputenv
byname, eq = envkeepalive.bywname, u'='
from rpython.rlib.rwin32 import lastSavedWindowsError as last_error
def envitems_llimpl():
environ = get_environ()
result = []
i = 0
while environ[i]:
name_value = traits.charp2str(environ[i])
p = name_value.find(eq)
if p >= 0:
result.append((name_value[:p], name_value[p+1:]))
i += 1
return result
def getenv_llimpl(name):
with traits.scoped_str2charp(name) as l_name:
l_result = getenv(l_name)
return traits.charp2str(l_result) if l_result else None
def putenv_llimpl(name, value):
l_string = traits.str2charp(name + eq + value)
error = rffi.cast(lltype.Signed, putenv(l_string))
if error:
traits.free_charp(l_string)
last_error("putenv failed")
# keep 'l_string' alive - we know that the C library needs it
# until the next call to putenv() with the same 'name'.
l_oldstring = byname.get(name, lltype.nullptr(traits.CCHARP.TO))
byname[name] = l_string
if l_oldstring:
traits.free_charp(l_oldstring)
return envitems_llimpl, getenv_llimpl, putenv_llimpl
envitems_llimpl, getenv_llimpl, putenv_llimpl = make_env_impls()
register_external(r_envitems, [], [(str0, str0)],
export_name='ll_os.ll_os_envitems',
llimpl=envitems_llimpl)
register_external(r_getenv, [str0],
annmodel.SomeString(can_be_None=True, no_nul=True),
export_name='ll_os.ll_os_getenv',
llimpl=getenv_llimpl)
register_external(r_putenv, [str0, str0], annmodel.s_None,
export_name='ll_os.ll_os_putenv',
llimpl=putenv_llimpl)
# ____________________________________________________________
def r_unsetenv(name):
# default implementation for platforms without a real unsetenv()
r_putenv(name, '')
REAL_UNSETENV = False
if hasattr(__import__(os.name), 'unsetenv'):
os_unsetenv = rffi.llexternal('unsetenv', [rffi.CCHARP], rffi.INT,
save_err=rffi.RFFI_SAVE_ERRNO)
def unsetenv_llimpl(name):
with rffi.scoped_str2charp(name) as l_name:
error = rffi.cast(lltype.Signed, os_unsetenv(l_name))
if error:
from rpython.rlib import rposix
raise OSError(rposix.get_saved_errno(), "os_unsetenv failed")
try:
l_oldstring = envkeepalive.byname[name]
except KeyError:
pass
else:
del envkeepalive.byname[name]
rffi.free_charp(l_oldstring)
register_external(r_unsetenv, [str0], annmodel.s_None,
export_name='ll_os.ll_os_unsetenv',
llimpl=unsetenv_llimpl)
REAL_UNSETENV = True
|
|
#!/usr/bin/env python
#
# setup.py
# Core Provenance Library
#
# Copyright 2012
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Contributor(s): Margo Seltzer, Peter Macko
#
'''
Class and functions supporting the Python bindings of the 'Core Provenance
Library <http://http://code.google.com/p/core-provenance-library/>'_.
This module contains:
* The cpl class.
* The cpl_ancestor class.
* The cpl_session class.
* The cpl_session_info class.
* The cpl_object class.
* The cpl_object_info class.
* The cpl_object_version class.
* The cpl_object_version_info class.
* Helper functions to print and construct cpl objects
'''
import sys
import CPLDirect
#
# Constants
#
NONE = CPLDirect.CPL_NONE
VERSION_NONE = CPLDirect.CPL_VERSION_NONE
DEPENDENCY_CATEGORY_DATA = CPLDirect.CPL_DEPENDENCY_CATEGORY_DATA
DEPENDENCY_CATEGORY_CONTROL = CPLDirect.CPL_DEPENDENCY_CATEGORY_CONTROL
DEPENDENCY_CATEGORY_VERSION = CPLDirect.CPL_DEPENDENCY_CATEGORY_VERSION
DEPENDENCY_NONE = CPLDirect.CPL_DEPENDENCY_NONE
DATA_INPUT = CPLDirect.CPL_DATA_INPUT
DATA_GENERIC = CPLDirect.CPL_DATA_GENERIC
DATA_IPC = CPLDirect.CPL_DATA_IPC
DATA_TRANSLATION = CPLDirect.CPL_DATA_TRANSLATION
DATA_COPY = CPLDirect.CPL_DATA_COPY
CONTROL_OP = CPLDirect.CPL_CONTROL_OP
CONTROL_GENERIC = CPLDirect.CPL_CONTROL_GENERIC
CONTROL_START = CPLDirect.CPL_CONTROL_START
VERSION_PREV = CPLDirect.CPL_VERSION_PREV
VERSION_GENERIC = CPLDirect.CPL_VERSION_GENERIC
S_OK = CPLDirect.CPL_S_OK
OK = CPLDirect.CPL_OK
S_DUPLICATE_IGNORED = CPLDirect.CPL_S_DUPLICATE_IGNORED
S_NO_DATA = CPLDirect.CPL_S_NO_DATA
S_OBJECT_CREATED = CPLDirect.CPL_S_OBJECT_CREATED
E_INVALID_ARGUMENT = CPLDirect.CPL_E_INVALID_ARGUMENT
E_INSUFFICIENT_RESOURCES = CPLDirect.CPL_E_INSUFFICIENT_RESOURCES
E_DB_CONNECTION_ERROR = CPLDirect.CPL_E_DB_CONNECTION_ERROR
E_NOT_IMPLEMENTED = CPLDirect.CPL_E_NOT_IMPLEMENTED
E_ALREADY_INITIALIZED = CPLDirect.CPL_E_ALREADY_INITIALIZED
E_NOT_INITIALIZED = CPLDirect.CPL_E_NOT_INITIALIZED
E_PREPARE_STATEMENT_ERROR = CPLDirect.CPL_E_PREPARE_STATEMENT_ERROR
E_STATEMENT_ERROR = CPLDirect.CPL_E_STATEMENT_ERROR
E_INTERNAL_ERROR = CPLDirect.CPL_E_INTERNAL_ERROR
E_BACKEND_INTERNAL_ERROR = CPLDirect.CPL_E_BACKEND_INTERNAL_ERROR
E_NOT_FOUND = CPLDirect.CPL_E_NOT_FOUND
E_ALREADY_EXISTS = CPLDirect.CPL_E_ALREADY_EXISTS
E_PLATFORM_ERROR = CPLDirect.CPL_E_PLATFORM_ERROR
E_INVALID_VERSION = CPLDirect.CPL_E_INVALID_VERSION
E_DB_NULL = CPLDirect.CPL_E_DB_NULL
E_DB_KEY_NOT_FOUND = CPLDirect.CPL_E_DB_KEY_NOT_FOUND
E_DB_INVALID_TYPE = CPLDirect.CPL_E_DB_INVALID_TYPE
O_FILESYSTEM = CPLDirect.CPL_O_FILESYSTEM
O_INTERNET = CPLDirect.CPL_O_INTERNET
T_ARTIFACT = CPLDirect.CPL_T_ARTIFACT
T_FILE = CPLDirect.CPL_T_FILE
T_PROCESS = CPLDirect.CPL_T_PROCESS
T_URL = CPLDirect.CPL_T_URL
L_NO_FAIL = CPLDirect.CPL_L_NO_FAIL
I_NO_CREATION_SESSION = CPLDirect.CPL_I_NO_CREATION_SESSION
I_NO_VERSION = CPLDirect.CPL_I_NO_VERSION
I_FAST = CPLDirect.CPL_I_FAST
D_ANCESTORS = CPLDirect.CPL_D_ANCESTORS
D_DESCENDANTS = CPLDirect.CPL_D_DESCENDANTS
A_NO_PREV_NEXT_VERSION = CPLDirect.CPL_A_NO_PREV_NEXT_VERSION
A_NO_DATA_DEPENDENCIES = CPLDirect.CPL_A_NO_DATA_DEPENDENCIES
A_NO_CONTROL_DEPENDENCIES = CPLDirect.CPL_A_NO_CONTROL_DEPENDENCIES
F_LOOKUP_ONLY = 0
F_ALWAYS_CREATE = CPLDirect.CPL_F_ALWAYS_CREATE
F_CREATE_IF_DOES_NOT_EXIST = CPLDirect.CPL_F_CREATE_IF_DOES_NOT_EXIST
#
# Private constants
#
__data_dict = ['data input', 'data ipc', 'data translation', 'data copy']
__control_dict = ['control op', 'control start']
#
# Global variables
#
_cpl_connection = None
#
# Private utility functions
#
def __getSignedNumber(number, bitLength):
'''
Print out a long value as a signed bitLength-sized integer.
Thanks to:
http://stackoverflow.com/questions/1375897/how-to-get-the-signed-integer-value-of-a-long-in-python
for this function.
'''
mask = (2 ** bitLength) - 1
if number & (1 << (bitLength - 1)):
return number | ~mask
else:
return number & mask
#
# CPLDirect enhancements
#
def __cpl_id_t__eq__(self, other):
'''
Compare this and another ID, and return true if they are equal
'''
return self.lo == other.lo and self.hi == other.hi
def __cpl_id_t__ne__(self, other):
'''
Compare this and another ID, and return true if they are not equal
'''
return self.lo != other.lo or self.hi != other.hi
def __cpl_id_t__str__(self):
'''
Create and return a string representation of this object
'''
return "%x:%x" % (self.hi, self.lo)
CPLDirect.cpl_id_t.__eq__ = __cpl_id_t__eq__
CPLDirect.cpl_id_t.__ne__ = __cpl_id_t__ne__
CPLDirect.cpl_id_t.__str__ = __cpl_id_t__str__
#
# Public utility functions
#
def current_connection():
'''
Return the current CPL connection object, or None if not connected
'''
global _cpl_connection
return _cpl_connection
def dependency_type_to_str(val):
'''
Given a dependency (edge) type, convert it to a string
Method calls::
strval = dependency_type_to_str(val)
'''
which = val >> 8
if which == DEPENDENCY_CATEGORY_DATA:
if (val & 7) < len(__data_dict):
return __data_dict[val & 7]
else:
return 'data unknown'
elif which == DEPENDENCY_CATEGORY_CONTROL:
if (val & 7) < len(__control_dict):
return __control_dict[val & 7]
else:
return 'control unknown'
elif which == DEPENDENCY_CATEGORY_VERSION:
return 'version'
else:
return 'unknown'
def copy_id(idp):
'''
Construct a cpl identifier type consisting of the hi and lo values.
Method calls::
id = copy_id(idp)
'''
i = CPLDirect.cpl_id_t()
i.hi = idp.hi
i.lo = idp.lo
return i
def p_id(id, with_newline = False):
'''
Print hi and lo fields of a CPL id, optionally with newline after it.
Method calls::
p_id(id, with_newline = False)
'''
sys.stdout.write('id: ' + str(id))
if with_newline:
sys.stdout.write('\n')
def p_object(obj, with_session = False):
'''
Print information about an object
Method calls:
p_object(obj, with_session = False)
'''
i = obj.info()
p_id(i.object.id)
print(' version: ' + str(i.version))
sys.stdout.write('container_id: ')
if i.container is not None:
p_id(i.container.object.id)
print(' container version: ' + str(i.container.version))
else:
sys.stdout.write('none')
print(' container version: none')
print('originator: ' + i.originator + ' name:' + i.name +
' type: ' + i.type)
if with_session:
print('creation_time: ' + str(i.creation_time))
p_session(i.creation_session)
def p_object_version(obj_ver, with_session = False):
'''
Print information about a version of an object
Method calls:
p_object_version(obj_ver, with_session = False)
'''
print(str(obj_ver))
i = obj_ver.info()
if with_session:
print('creation_time: ' + str(i.creation_time))
p_session(i.session)
def p_session(session):
'''
Print information about a session
Method calls:
p_session(session)
'''
si = session.info()
sys.stdout.write('session ')
p_id(si.session.id, with_newline = True)
print(' mac_address: ' + si.mac_address + ' pid: ' + str(si.pid))
print('\t(' + str(si.start_time) + ')' + ' user: ' +
si.user + ' cmdline: ' + si.cmdline + ' program: ' + si.program)
#
# Information about a specific version of a provenance object
#
class cpl_object_version_info:
'''
Information about a specific version of a provenance object
'''
def __init__(self, object_version, session, creation_time):
'''
Create an instance of this object
'''
self.object_version = object_version
self.session = session
self.creation_time = creation_time
#
# Object & version
#
class cpl_object_version:
'''
Stores a reference to a provenance object and a version number
'''
def __init__(self, object, version):
'''
Create an instance of this object
''',
self.object = object
self.version = version
def __eq__(self, other):
'''
Compare this and the other object, and return true if they are equal
'''
return self.object.id==other.object.id and self.version==other.version
def __ne__(self, other):
'''
Compare this and the other object, and return true if they are not equal
'''
return self.object.id!=other.object.id or self.version!=other.version
def __str__(self):
'''
Create and return a human-readable string representation of this object
'''
return str(self.object) + '-' + str(self.version)
def info(self):
'''
Return the corresponding cpl_object_version_info for this specific
version of the object.
'''
infopp = CPLDirect.new_cpl_version_info_tpp()
ret = CPLDirect.cpl_get_version_info(self.object.id, self.version,
CPLDirect.cpl_convert_pp_cpl_version_info_t(infopp))
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_version_info_tpp(infopp)
raise Exception('Unable to get object version info: ' +
CPLDirect.cpl_error_string(ret))
op = CPLDirect.cpl_dereference_pp_cpl_version_info_t(infopp)
info = CPLDirect.cpl_version_info_tp_value(op)
_info = cpl_object_version_info(self, cpl_session(info.session),
info.creation_time)
CPLDirect.cpl_free_version_info(op)
CPLDirect.delete_cpl_version_info_tpp(infopp)
return _info
def control_flow_to(self, dest, type=CONTROL_OP):
'''
Add a control flow edge of type from self to dest.
'''
return self.object.control_flow_to(dest, type, self.version)
def data_flow_to(self, dest, type=DATA_INPUT):
'''
Add a data flow edge of type from self to dest.
'''
return self.object.data_flow_to(dest, type, self.version)
#
# Provenance ancestry entry
#
class cpl_ancestor:
'''
Stores the same data as a cpl_ancestry_entry_t, but in a Python
class that we manage.
'''
def __init__(self, aid, aversion, did, dversion, type, direction):
'''
Create an instance of cpl_ancestor
'''
self.ancestor = cpl_object_version(cpl_object(aid), aversion)
self.descendant = cpl_object_version(cpl_object(did), dversion)
self.type = type
if direction == D_ANCESTORS:
self.base = self.descendant
self.other = self.ancestor
else:
self.base = self.ancestor
self.other = self.descendant
def __str__(self):
'''
Create a printable string representation of this object
'''
arrow = ' -- '
if self.other == self.ancestor:
arrow = ' --> '
else:
arrow = ' <-- '
return (str(self.base) + arrow + str(self.other) +
' type:' + dependency_type_to_str(self.type))
#
# CPL Connection
#
class cpl_connection:
'''
Core provenance library connection -- maintains state for the current
session and the current database backend.
'''
def __init__(self, cstring="DSN=CPL;"):
'''
Constructor for CPL connection.
** Parameters **
** cstring **
Connection string for database backend
** Note **
Currently the python bindings support only ODBC connection.
RDF connector coming soon.
'''
global _cpl_connection
self.connection_string = cstring
self.closed = False
def get_current_session():
idp = CPLDirect.new_cpl_id_tp()
ret = CPLDirect.cpl_get_current_session(idp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_id_tp(idp)
raise Exception("Could not get current session" +
CPLDirect.cpl_error_string(ret))
s = CPLDirect.cpl_id_tp_value(idp)
i = copy_id(s)
CPLDirect.delete_cpl_id_tp(idp)
return i
backend = CPLDirect.new_cpl_db_backend_tpp()
ret = CPLDirect.cpl_create_odbc_backend(cstring,
CPLDirect.CPL_ODBC_GENERIC, backend)
if not CPLDirect.cpl_is_ok(ret):
raise Exception("Could not create ODBC connection" +
CPLDirect.cpl_error_string(ret))
self.db = CPLDirect.cpl_dereference_pp_cpl_db_backend_t(backend)
ret = CPLDirect.cpl_attach(self.db)
CPLDirect.delete_cpl_db_backend_tpp(backend)
if not CPLDirect.cpl_is_ok(ret):
raise Exception("Could not open ODBC connection" +
CPLDirect.cpl_error_string(ret))
self.session = cpl_session(get_current_session())
_cpl_connection = self
def __del__(self):
'''
Destructor - automatically closes the connection.
'''
if self == _cpl_connection and not self.closed:
self.close()
def __create_or_lookup_cpl_object(self, originator,
name, type, create=None, container=None):
'''
Create or lookup a CPL object
** Parameters **
originator
name: originator-local name
type: originator-local type
create:
None: lookup or create
True: create only
False: lookup only
container:
Id of container into which to place this object.
Only applies to create
'''
if container is None:
container_id = NONE
else:
container_id = container.id
idp = CPLDirect.new_cpl_id_tp()
if create == None:
ret = CPLDirect.cpl_lookup_or_create_object(originator, name,
type, container_id, idp)
if ret == S_OBJECT_CREATED:
ret = S_OK
elif create:
ret = CPLDirect.cpl_create_object(originator,
name, type, container_id, idp)
else:
ret = CPLDirect.cpl_lookup_object(originator, name, type, idp)
if ret == E_NOT_FOUND:
CPLDirect.delete_cpl_id_tp(idp)
raise LookupError('Not found')
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_id_tp(idp)
raise Exception('Could not find or create' +
' provenance object: ' + CPLDirect.cpl_error_string(ret))
r = cpl_object(idp)
CPLDirect.delete_cpl_id_tp(idp)
return r
def get_all_objects(self, fast=False):
'''
Return all objects in the provenance database. If fast = True, then
fetch only incomplete information about each object, so that it is
faster.
'''
if fast:
flags = CPLDirect.CPL_I_FAST
else:
flags = 0
vp = CPLDirect.new_std_vector_cplxx_object_info_tp()
ret = CPLDirect.cpl_get_all_objects(flags,
CPLDirect.cpl_cb_collect_object_info_vector, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_std_vector_cplxx_object_info_tp(vp)
raise Exception('Unable to get all objects: ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_dereference_p_std_vector_cplxx_object_info_t(vp)
l = []
if v != S_NO_DATA :
for e in v:
if e.container_id == NONE or e.container_version < 0:
container = None
else:
container = cpl_object_version(cpl_object(e.container_id),
e.container_version)
if e.creation_session == NONE:
creation_session = None
else:
creation_session = cpl_session(e.creation_session)
l.append(cpl_object_info(cpl_object(e.id), e.version,
creation_session, e.creation_time, e.originator, e.name,
e.type, container))
CPLDirect.delete_std_vector_cplxx_object_info_tp(vp)
return l
def get_object(self, originator, name, type, container=None):
'''
Get the object, with the designated originator (string),
name (string), and type (string), creating it if necessary.
If you want an object in a specific container, set the container
parameter to the ID of the object in which you want this object
created.
'''
return self.__create_or_lookup_cpl_object(originator, name, type,
create=None, container=container)
def create_object(self, originator, name, type, container=None):
'''
Create object, returns None if object already exists.
'''
return self.__create_or_lookup_cpl_object(originator, name, type,
create=True, container=container)
def lookup_object(self, originator, name, type):
'''
Look up object; raise LookupError if the object does not exist.
'''
o = self.__create_or_lookup_cpl_object(originator, name, type,
create=False)
return o
def try_lookup_object(self, originator, name, type):
'''
Look up object; returns None if the object does not exist.
'''
try:
o = self.__create_or_lookup_cpl_object(originator, name, type,
create=False)
except LookupError:
o = None
return o
def lookup_by_property(self, key, value):
'''
Return all objects that have the key/value property specified; raise
LookupError if no such object is found.
'''
vp = CPLDirect.new_std_vector_cpl_id_version_tp()
ret = CPLDirect.cpl_lookup_by_property(key, value,
CPLDirect.cpl_cb_collect_property_lookup_vector, vp)
if ret == E_NOT_FOUND:
CPLDirect.delete_std_vector_cpl_id_version_tp(vp)
raise LookupError('Not found')
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_std_vector_cpl_id_version_tp(vp)
raise Exception('Unable to lookup by property ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_dereference_p_std_vector_cpl_id_version_t(vp)
l = []
for e in v:
l.append(cpl_object_version(cpl_object(e.id), e.version))
CPLDirect.delete_std_vector_cpl_id_version_tp(vp)
return l
def try_lookup_by_property(self, key, value):
'''
Return all objects that have the key/value property specified, but do
not fail if no such object is found -- return an empty list instead.
'''
try:
o = self.lookup_by_property(key, value)
except LookupError:
o = []
return o
def lookup_all(self, originator, name, type):
'''
Return all objects that have the specified originator, name,
and type (they might differ by container).
'''
vp = CPLDirect.new_std_vector_cpl_id_timestamp_tp()
ret = CPLDirect.cpl_lookup_object_ext(originator, name, type,
L_NO_FAIL, CPLDirect.cpl_cb_collect_id_timestamp_vector, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_std_vector_cpl_id_timestamp_tp(vp)
raise Exception('Unable to lookup all objects: ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_dereference_p_std_vector_cpl_id_timestamp_t(vp)
l = []
if v != S_NO_DATA :
for e in v:
l.append(cpl_object(e.id))
CPLDirect.delete_std_vector_cpl_id_timestamp_tp(vp)
return l
def get_object_for_file(self, file_name, mode=F_CREATE_IF_DOES_NOT_EXIST):
'''
Get or create (depending on the value of mode) a provenance object
that corresponds to the given file on the file system. The file must
already exist.
Please note that the CPL internally refers to the files using their
full path, so if you move the file by a utility that is not CPL-aware,
a subsequent call to this function with the same file (after it has
been moved or renamed) will not find the return back the same
provenance object. Furthermore, beware that if you use hard links,
you will get different provenance objects for different names/paths
of the file.
The mode can be one of the following values:
* F_LOOKUP_ONLY: Perform only the lookup -- do not create the
corresponding provenance object if it does not already exists.
* F_CREATE_IF_DOES_NOT_EXIST: Create the corresponding provenance
object if it does not already exist (this is the default).
* F_ALWAYS_CREATE: Always create a new corresponding provenance
object, even if it already exists. Use this if you completely
overwrite the file.
'''
idp = CPLDirect.new_cpl_id_tp()
vp = CPLDirect.new_cpl_version_tp()
ret = CPLDirect.cpl_lookup_file(file_name, mode, idp, vp)
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not find or create provenance object' +
' for a file: ' + CPLDirect.cpl_error_string(ret))
r = cpl_object(idp)
CPLDirect.delete_cpl_id_tp(idp)
CPLDirect.delete_cpl_version_tp(vp)
return r
def create_object_for_file(self, file_name):
'''
Create a provenance object that corresponds to the specified file.
This function is equivalent to calling get_object_for_file() with
mode = F_ALWAYS_CREATE.
'''
return self.get_object_for_file(file_name, F_ALWAYS_CREATE)
def close(self):
'''
Close database connection and session
'''
global _cpl_connection
if self != _cpl_connection or self.closed:
return
ret = CPLDirect.cpl_detach()
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not detach ' +
CPLDirect.cpl_error_string(ret))
_cpl_connection = None
self.closed = True
#
# Information about a provenance session
#
class cpl_session_info:
'''
Information about a provenance session
'''
def __init__(self, session, mac_address, user, pid, program, cmdline,
start_time):
'''
Create an instance of this object
'''
self.session = session
self.mac_address = mac_address
self.user = user
self.pid = pid
self.program = program
self.cmdline = cmdline
self.start_time = start_time
#
# CPL Session
#
class cpl_session:
'''
CPL Session
'''
def __init__(self, id):
'''
Initialize an instance of cpl_session
'''
self.id = copy_id(id)
def __eq__(self, other):
'''
Compare this and the other object and return true if they are equal
'''
return self.id == other.id
def __ne__(self, other):
'''
Compare this and the other object and return true if they are not equal
'''
return self.id != other.id
def __str__(self):
'''
Return a string representation of this object
'''
return str(self.id)
def info(self):
'''
Return the cpl_session_info object associated with this session.
'''
sessionpp = CPLDirect.new_cpl_session_info_tpp()
ret = CPLDirect.cpl_get_session_info(self.id,
CPLDirect.cpl_convert_pp_cpl_session_info_t(sessionpp))
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_session_info_tpp(sessionpp)
raise Exception('Could not find session information: ' +
CPLDirect.cpl_error_string(ret))
sessionp = CPLDirect.cpl_dereference_pp_cpl_session_info_t(sessionpp)
info = CPLDirect.cpl_session_info_tp_value(sessionp)
_info = cpl_session_info(self, info.mac_address, info.user,
info.pid, info.program, info.cmdline, info.start_time)
CPLDirect.cpl_free_session_info(sessionp)
CPLDirect.delete_cpl_session_info_tpp(sessionpp)
return _info
#
# Information about a provenance object
#
class cpl_object_info:
'''
Information about a provenance object
'''
def __init__(self, object, version, creation_session, creation_time,
originator, name, type, container):
'''
Create an instance of this object
'''
self.object = object
self.version = version
self.creation_session = creation_session
self.creation_time = creation_time
self.originator = originator
self.name = name
self.type = type
self.container = container
#
# CPL Provenance object
#
class cpl_object:
'''
CPL Provenance object
'''
def __init__(self, id):
'''
Create a new instance of a provenance object from its internal ID
'''
self.id = copy_id(id)
def __eq__(self, other):
'''
Compare this and the other object and return true if they are equal
'''
return self.id == other.id
def __ne__(self, other):
'''
Compare this and the other object and return true if they are not equal
'''
return self.id != other.id
def __str__(self):
'''
Return a string representation of this object
'''
return str(self.id)
def version(self):
'''
Determine the current version of this provenance object
'''
vp = CPLDirect.new_cpl_version_tp()
ret = CPLDirect.cpl_get_version(self.id, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_version_tp(vp)
raise Exception('Could not determine the version of an object: ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_version_tp_value(vp)
CPLDirect.delete_cpl_version_tp(vp)
return v
def new_version(self):
'''
Create a new version of this object and return the new version.
'''
vp = CPLDirect.new_cpl_version_tp()
ret = CPLDirect.cpl_new_version(self.id, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_version_tp(vp)
raise Exception('Could not createa a new version of an object: ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_version_tp_value(vp)
CPLDirect.delete_cpl_version_tp(vp)
return v
def current_version(self):
'''
Get a cpl_object_version object for the current version.
'''
return cpl_object_version(self, self.version())
def specific_version(self, version):
'''
Get a cpl_object_version object for the specified version. Note that
the specified version number does not get validated until info() is
called.
'''
return cpl_object_version(self, version)
def control_flow_to(self, dest, type=CONTROL_OP, version=None):
'''
Add control flow edge of type from self to dest. If version
is specified, then add flow to dest with explicit version,
else add to most recent version.
Allowed types:
CPL.CONTROL_OP (default)
CPL.CONTROL_START
CPL.CONTROL_GENERIC is an alias for CPL.CONTROL_OP.
'''
if version is None or version == VERSION_NONE:
version = self.version()
ret = CPLDirect.cpl_control_flow_ext(dest.id, self.id, version, type)
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not add control dependency: ' +
CPLDirect.cpl_error_string(ret))
return not ret == S_DUPLICATE_IGNORED
def data_flow_to(self, dest, type=DATA_INPUT, version=None):
'''
Add data flow edge of type from self to dest. If version
is specified, then add flow to dest with explicit version,
else add to most recent version.
Allowed types:
CPL.DATA_INPUT (default)
CPL.DATA_IPC
CPL.DATA_TRANSLATION
CPL.DATA_COPY
CPL.DATA_GENERIC is an alias for CPL.DATA_INPUT.
'''
if version is None or version == VERSION_NONE:
version = self.version()
ret = CPLDirect.cpl_data_flow_ext(dest.id, self.id, version, type)
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not add data dependency ' +
CPLDirect.cpl_error_string(ret))
return not ret == S_DUPLICATE_IGNORED
def control_flow_from(self, src, type=CONTROL_OP, version=None):
'''
Add control flow edge of the given type from src to self. If version
is specified, then add flow to dest with explicit version, else add
to most recent version.
Allowed types:
CPL.CONTROL_OP (default)
CPL.CONTROL_START
CPL.CONTROL_GENERIC is an alias for CPL.CONTROL_OP.
'''
if isinstance(src, cpl_object_version):
if version is not None and version != VERSION_NONE:
raise Exception('The version argument must be None if ' +
'src is of type cpl_object_version')
_version = src.version
_src = src.object
elif version is None or version == VERSION_NONE:
_version = src.version()
_src = src
else:
_version = version
_src = src
ret = CPLDirect.cpl_control_flow_ext(self.id, _src.id, _version, type)
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not add control dependency: ' +
CPLDirect.cpl_error_string(ret))
return not ret == S_DUPLICATE_IGNORED
def data_flow_from(self, src, type=DATA_INPUT, version=None):
'''
Add data flow edge of the given type from src to self. If version
is specified, then add flow to dest with explicit version, else add
to most recent version.
Allowed types:
CPL.DATA_INPUT (default)
CPL.DATA_IPC
CPL.DATA_TRANSLATION
CPL.DATA_COPY
CPL.DATA_GENERIC is an alias for CPL.DATA_INPUT.
'''
if isinstance(src, cpl_object_version):
if version is not None and version != VERSION_NONE:
raise Exception('The version argument must be None if ' +
'src is of type cpl_object_version')
_version = src.version
_src = src.object
elif version is None or version == VERSION_NONE:
_version = src.version()
_src = src
else:
_version = version
_src = src
ret = CPLDirect.cpl_data_flow_ext(self.id, _src.id, _version, type)
if not CPLDirect.cpl_is_ok(ret):
raise Exception('Could not add data dependency ' +
CPLDirect.cpl_error_string(ret))
return not ret == S_DUPLICATE_IGNORED
def has_ancestor(self, other):
'''
Return True if the other object is an ancestor of the object.
'''
ancestors = self.ancestry()
for a in ancestors:
if a.ancestor.object == other:
return True
return False
def add_property(self, name, value):
'''
Add name/value pair as a property to current object.
'''
return CPLDirect.cpl_add_property(self.id, name, value)
def info(self):
'''
Return cpl_object_info_t corresponding to the current object.
'''
objectpp = CPLDirect.new_cpl_object_info_tpp()
ret = CPLDirect.cpl_get_object_info(self.id,
CPLDirect.cpl_convert_pp_cpl_object_info_t(objectpp))
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_cpl_object_info_tpp(objectpp)
raise Exception('Unable to get object info: ' +
CPLDirect.cpl_error_string(ret))
op = CPLDirect.cpl_dereference_pp_cpl_object_info_t(objectpp)
object = CPLDirect.cpl_object_info_tp_value(op)
if object.container_id == NONE or object.container_version < 0:
container = None
else:
container = cpl_object_version(cpl_object(object.container_id),
object.container_version)
_info = cpl_object_info(self, object.version,
cpl_session(object.creation_session), object.creation_time,
object.originator, object.name, object.type, container)
CPLDirect.cpl_free_object_info(op)
CPLDirect.delete_cpl_object_info_tpp(objectpp)
return _info
def ancestry(self, version=None, direction=D_ANCESTORS, flags=0):
'''
Return a list of cpl_ancestor objects
'''
if version is None:
version = VERSION_NONE
vp = CPLDirect.new_std_vector_cpl_ancestry_entry_tp()
ret = CPLDirect.cpl_get_object_ancestry(self.id, version,
direction, flags, CPLDirect.cpl_cb_collect_ancestry_vector, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_std_vector_cpl_ancestry_entry_tp(vp)
raise Exception('Error retrieving ancestry: ' +
CPLDirect.cpl_error_string(ret))
return None
v = CPLDirect.cpl_dereference_p_std_vector_cpl_ancestry_entry_t(vp)
l = []
if direction == D_ANCESTORS:
for entry in v:
a = cpl_ancestor(entry.other_object_id,
entry.other_object_version,
entry.query_object_id,
entry.query_object_version, entry.type, direction)
l.append(a)
else:
for entry in v:
a = cpl_ancestor(entry.query_object_id,
entry.query_object_version,
entry.other_object_id,
entry.other_object_version, entry.type, direction)
l.append(a)
CPLDirect.delete_std_vector_cpl_ancestry_entry_tp(vp)
return l
def properties(self, key=None, version=None):
'''
Return all the properties associated with the current object.
If key is set to something other than None, return only those
properties matching key.
By default, returns properties for the current version of
the object, but if version is set to a value other than
CPL.VERSION_NONE, then will return properties for that version.
'''
if version is None:
version = VERSION_NONE
vp = CPLDirect.new_std_vector_cplxx_property_entry_tp()
ret = CPLDirect.cpl_get_properties(self.id, version,
key, CPLDirect.cpl_cb_collect_properties_vector, vp)
if not CPLDirect.cpl_is_ok(ret):
CPLDirect.delete_std_vector_cplxx_property_entry_tp(vp)
raise Exception('Error retrieving properties: ' +
CPLDirect.cpl_error_string(ret))
v = CPLDirect.cpl_dereference_p_std_vector_cplxx_property_entry_t(vp)
l = []
for e in v:
l.append([e.key, e.value])
CPLDirect.delete_std_vector_cplxx_property_entry_tp(vp)
return l
|
|
""" Module for running pPXF analyses"""
from pkg_resources import resource_filename
import numpy as np
from matplotlib import pyplot as plt
#from goodies import closest
from astropy.cosmology import Planck15 as cosmo
from astropy import constants
from astropy import units
from astropy.table import Table
c = constants.c.to(units.km / units.s).value
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.io import readspec
from ppxf import ppxf
from ppxf import ppxf_util as util
from ppxf import miles_util as lib
from time import clock
from IPython import embed
def run(spec_file, R, zgal, results_file=None, spec_fit='tmp.fits', chk=True,
flux_scale=1., atmos=[], gaps=[], wvmnx=(0.,1e9)):
"""
Wrapper for running and handling outputs
Outputs are written to disk
Args:
spec_file (str or XSpectrum1D):
R (float):
zgal (float):
results_file (str, optional):
spec_fit:
chk:
flux_scale:
atmos (list of tuple):
List of (wvmin,wvmax) regions to mask during the analysis
This is a list of lists, e.g. [[7150., 7300.]]
gaps (list):
Regions to ignore due to detector gaps or any other bad regions
This is a list of lists, e.g. [[6675., 6725.]]
wvmnx:
"""
# Init
# Load spectrum
if isinstance(spec_file, XSpectrum1D):
spec = spec_file
else:
spec = readspec(spec_file)
if chk:
spec.plot()
# Rebin
wave = spec.wavelength.value
diff = wave[1:] - wave[0:-1]
meddiff = np.median(diff)
print(meddiff)
newwave = np.arange(wave[0], wave[-2], meddiff) * units.angstrom
newspec = spec.rebin(newwave, do_sig=True, grow_bad_sig=True)
# Scale to MUSE flux units
newspec = XSpectrum1D.from_tuple((newspec.wavelength.value,
newspec.flux * flux_scale,
newspec.sig * flux_scale))
# Mask
wave = newspec.wavelength.value
goodpixels = np.where((wave >= wvmnx[0]) & (wave <= wvmnx[1]))[0]
mask_lam = atmos
mask_lam.extend(gaps)
goodidxs = np.array([]).astype(int)
for i, mrange in enumerate(mask_lam):
theseidxs = np.where((wave < mrange[0]) | (wave > mrange[1]))[0]
goodpixels = np.intersect1d(theseidxs, goodpixels)
goodpixels = np.unique(goodpixels)
goodpixels.sort()
if chk:
plt.clf()
plt.plot(newspec.wavelength[goodpixels], newspec.flux[goodpixels])
plt.show()
# Run it
ppfit, miles, star_weights = fit_spectrum(newspec, zgal, R, degree_mult=0, degree_add=3,
goodpixels=goodpixels, reddening=1., rebin=False)
# Age
age, metals = miles.mean_age_metal(star_weights)
print('Age = {} Gyr'.format(age))
print('Metals = {}'.format(metals))
# Mass -- This is a bit approximate as Dwv is a guess for now
actualflux = ppfit.bestfit * constants.L_sun.cgs / units.angstrom / (
4 * np.pi * (cosmo.luminosity_distance(zgal).to(units.cm)) ** 2 / (1 + zgal))
# When fitting, the routine thought our data and model spectra had same units...
Dwv = 1700. # Ang, width of the band pass
scfactor = np.median(ppfit.bestfit * (units.erg / units.s / units.cm ** 2 / units.angstrom) / actualflux) * Dwv
# To get the actual model mass required to fit spectrum, scale by this ratio
massmodels = scfactor * miles.total_mass(star_weights)
print('log10 M* = {}'.format(np.log10(massmodels)))
# Reddening
print('E(B-V) = {}'.format(ppfit.reddening))
# Write?
if results_file is not None:
dump_ppxf_results(ppfit, miles, zgal, results_file)
if spec_fit is not None:
bestfit = dump_bestfit(ppfit, outfile=spec_fit, z=zgal)
# Final check
if chk:
bestfit = dump_bestfit(ppfit, z=zgal)
plt.clf()
plt.plot(newspec.wavelength, newspec.flux)
plt.plot(bestfit.wavelength, bestfit.flux)
plt.show()
def fit_spectrum(spec, zgal, specresolution, tie_balmer=False,
miles_dir=None, rebin=True,
limit_doublets=False, degree_add=None,degree_mult=5,
**kwargs):
"""This is a wrapper for pPXF to fit stellar population models as well as
emission lines to galaxy spectra. Although pPXF allows otherwise, the
emission lines are kinematically fixed to one another as are the stellar
models, and the stars and gas independently vary with one another.
Please see the pPXF documentation for more details on the vast array of
parameters and options afforded by the software.
The pPXF software may be downloaded at
http://www-astro.physics.ox.ac.uk/~mxc/software/
Parameters
----------
spec : XSpectrum1D
Spectrum to be fitted
zgal : float
Redshift of galaxy
specresolution : float
Spectral resolution (R) of the data
tie_balmer : bool, optional
Assume intrinsic Balmer decrement. See documentation in ppxf_util.py,
as this has implications for the derived reddening.
limit_doublets : bool, optional
Limit the ratios of [OII] and [SII] lines to ranges allowed by atomic
physics. See documentation in ppxf_util.py, as this has implications
for getting the true flux values from those reported.
degree_add : int, optional
Degree of the additive Legendre polynomial used to modify the template
continuum in the fit.
degree_mult : int,optional
miles_dir: str, optional
Location of MILES models
Returns
-------
ppfit : ppxf
Object returned by pPXF; attributes are data pertaining to the fit
miles : miles
Contains information about the stellar templates used in the fit.
See the documentation in miles_util.py for full details
weights : 1d numpy vector
Weights of the *stellar* template components. Equivalent to the
first N elements of ppfit.weights where N is the number of stellar
templates used in the fit.
"""
if spec is None:
print('Galaxy has no spectrum associated')
return None
### Spectra must be rebinned to a log scale (in wavelength).
### pPXF provides a routine to do this, but the input spectra
### must be sampled uniformly linear. linetools to the rescue!
if rebin:
meddiff = np.median(spec.wavelength.value[1:] - spec.wavelength.value[0:-1])
newwave = np.arange(spec.wavelength.value[0], spec.wavelength.value[-1], meddiff)
spec = spec.rebin(newwave * units.AA, do_sig=True, grow_bad_sig=True)
# get data and transform wavelength to rest frame for template fitting
wave = spec.wavelength.to(units.Angstrom).value
flux = spec.flux.value
flux = flux * (1. + zgal)
noise = spec.sig.value
noise = noise * (1. + zgal)
wave = wave / (1. + zgal)
# transform to air wavelengths for MILES templates and get approx. FWHM
wave *= np.median(util.vac_to_air(wave) / wave)
# use only wavelength range covered by templates
mask = (wave > 3540) & (wave < 7409)
#mask = (wave > 1682) & (wave < 10000.)
maskidx = np.where(mask)[0]
# also deal with declared good regions of the spectrum
if 'goodpixels' in kwargs:
goodpix = kwargs['goodpixels']
pixmask = np.in1d(maskidx, goodpix)
newgoodpix = np.where(pixmask)[0]
kwargs['goodpixels'] = newgoodpix
wave = wave[mask]
flux = flux[mask]
noise = noise[mask]
# Nonnegative noise values are not allowed
noise[noise <= 0] = np.max(noise)
# pPXF requires the spectra to be log rebinned, so do it
flux, logLam, velscale = util.log_rebin(np.array([wave[0], wave[-1]]), flux)
noise, junk1, junk2 = util.log_rebin(np.array([wave[0], wave[-1]]), noise)
### The following lines unnecessary for DEIMOS/Hecto spectra due to their
### units but rescaling may be necessary for some
# galaxy = flux / np.median(flux) # Normalize spectrum to avoid numerical issues
# print 'Scale flux by', round(np.median(flux),2)
galaxy = flux # use the native units
# pPXF wants the spectral resolution in units of wavelength
FWHM_gal = wave/ specresolution
### Set up stellar templates
#miles_dir = resource_filename('ppxf', '/miles_models/')
#miles_dir = resource_filename('ppxf', '/emiles_padova_chabrier/')
if miles_dir is None:
miles_dir = resource_filename('ppxf', '/miles_padova_chabrier/')
#path4libcall = miles_dir + 'Mun1.30*.fits'
#path4libcall = miles_dir + 'Ech1.30*.fits'
path4libcall = miles_dir + 'Mch1.30*.fits'
miles = lib.miles(path4libcall, velscale, FWHM_gal, wave_gal=wave)
### Stuff for regularization dimensions
reg_dim = miles.templates.shape[1:]
stars_templates = miles.templates.reshape(miles.templates.shape[0], -1)
# See the pPXF documentation for the keyword REGUL
regul_err = 0.01 # Desired regularization error
### Now the emission lines! Only include lines in fit region.
if 'goodpixels' in kwargs:
gal_lam = wave[newgoodpix]
# Also, log rebinning the spectrum change which pixels are 'goodpixels'
newnewgoodpix = np.searchsorted(np.exp(logLam),gal_lam,side='left')
uqnewnewgoodpix = np.unique(newnewgoodpix)
if uqnewnewgoodpix[-1] == len(wave):
uqnewnewgoodpix =uqnewnewgoodpix[:-1]
kwargs['goodpixels'] = uqnewnewgoodpix
else:
gal_lam = wave
def FWHM_func(wave): # passed to generate emission line templates
return wave / specresolution
gas_templates, gas_names, line_wave = util.emission_lines_mask(
miles.log_lam_temp, gal_lam, FWHM_func,
tie_balmer=tie_balmer, limit_doublets=limit_doublets)
# How many gas components do we have?
balmerlines = [ll for ll in gas_names if ll[0] == 'H']
numbalm = len(balmerlines)
numforbid = len(gas_names) - numbalm
# Stack all templates
templates = np.column_stack([stars_templates, gas_templates])
# other needed quantities
dv = c * (miles.log_lam_temp[0] - logLam[0])
### use the following line if not transforming to z=0 first
# vel = c * np.log(1 + zgal) # eq.(8) of Cappellari (2017)
vel = 0. # We already transformed to the restframe!
start = [vel, 25.] # (km/s), starting guess for [V, sigma]
### Set up combination of templates
n_temps = stars_templates.shape[1]
n_balmer = 1 if tie_balmer else numbalm # Number of Balmer lines included in the fit
n_forbidden = numforbid # Number of other lines included in the fit
# Assign component=0 to the stellar templates, component=1 to the Balmer
# emission lines templates, and component=2 to the forbidden lines.
# component = [0]*n_temps + [1]*n_balmer + [2]*n_forbidden
component = [0] * n_temps + [1] * (n_balmer + n_forbidden) # tie the gas lines together
gas_component = np.array(component) > 0 # gas_component=True for gas templates
# Fit (V, sig, h3, h4) moments=4 for the stars
# and (V, sig) moments=2 for the two gas kinematic components
if len(gas_names) > 0:
moments = [2, 2] # fix the gas kinematic components to one another
start = [[vel, 50.], start] # Adopt different gas/stars starting values
else:
moments = [2] # only stars to be fit
start = [vel, 50.]
# If the Balmer lines are tied one should allow for gas reddening.
# The gas_reddening can be different from the stellar one, if both are fitted.
gas_reddening = 0 if tie_balmer else None
if degree_add is None:
degree_add = -1
t = clock()
ppfit = ppxf.ppxf(templates, galaxy, noise, velscale, start,
plot=False, moments=moments, degree=degree_add, vsyst=dv,
lam=np.exp(logLam), clean=False, regul=1. / regul_err,
reg_dim=reg_dim,component=component, gas_component=gas_component,
gas_names=gas_names, gas_reddening=gas_reddening, mdegree=degree_mult,
**kwargs)
print('Desired Delta Chi^2: %.4g' % np.sqrt(2 * galaxy.size))
print('Current Delta Chi^2: %.4g' % ((ppfit.chi2 - 1) * galaxy.size))
print('Elapsed time in PPXF: %.2f s' % (clock() - t))
weights = ppfit.weights[~gas_component] # Exclude weights of the gas templates
weights = weights.reshape(reg_dim) # Normalized
return ppfit, miles, weights
def total_mass(miles, weights, quiet=False):
"""
Computes the total mass of living stars and stellar remnants
in models fitted, given the weights produced and output by pPXF.
A Salpeter IMF is assumed (slope=1.3) initially.
- TODO: Employ Chabrier models
The returned mass excludes the gas lost during stellar evolution.
This procedure uses the mass predictions
from Vazdekis+12 and Ricciardelli+12
http://adsabs.harvard.edu/abs/2012MNRAS.424..157V
http://adsabs.harvard.edu/abs/2012MNRAS.424..172R
they were downloaded in December 2016 below and are included in pPXF with permission
http://www.iac.es/proyecto/miles/pages/photometric-predictions/based-on-miuscat-seds.php
Parameters
----------
miles : miles
Miles object output from fit_spectrum()
weights : 1d numpy array
Weights vector corresponding to stellar templates.
Output from fit_spectrum()
quiet : bool, optional
If True, do not print stellar mass result
Returns
-------
mass_no_gas : float
Total stellar mass of templates fitted.
NOTE: this value will generally need to be scaled if there is any
mismatch in units between data and models. The MILES model spectra
are generally given in units of L_sun/M_sun/Angstrom
"""
from astropy.table import Table
assert miles.age_grid.shape == miles.metal_grid.shape == weights.shape, \
"Input weight dimensions do not match"
#file_dir = path.dirname(path.realpath(__file__)) # path of this procedure
#miles_dir = resource_filename('ppxf', '/miles_models/')
miles_dir = resource_filename('ppxf', '/miles_padova_chabrier/')
#file1 = miles_dir + "/Vazdekis2012_ssp_mass_Padova00_UN_baseFe_v10.0.txt"
file1 = miles_dir + "out_mass_CH_PADOVA00.txt"
colnames = ['IMF','slope','[M/H]','Age','Mtotal','M(*+remn)','M*','Mremn',
'Mgas','M(*+remn)/Lv','M*/Lv','Mv','unknown']
tab = Table.read(file1,format='ascii',names=colnames)
slope1 = tab['slope']
MH1 = tab['[M/H]']
Age1 = tab['Age']
m_no_gas = tab['Mtotal']
# The following loop is a brute force but very safe and general
# way of matching the photometric quantities to the SSP spectra.
# It makes no assumption on the sorting and dimensions of the files
mass_no_gas_grid = np.empty_like(weights)
for j in range(miles.n_ages):
for k in range(miles.n_metal):
p1 = (np.abs(miles.age_grid[j, k] - Age1) < 0.001) & \
(np.abs(miles.metal_grid[j, k] - MH1) < 0.01) & \
(np.abs(1.30 - slope1) < 0.01)
mass_no_gas_grid[j, k] = m_no_gas[p1]
mass_no_gas = np.sum(weights*mass_no_gas_grid)
if not quiet:
print('Total mass: %.4g' % mass_no_gas)
return mass_no_gas
def dump_bestfit(ppfit, outfile=None, z=0.):
"""
Create the bestfit in the observer frame and with vacuum wavelengths
Parameters
----------
ppfit
outfile
Returns
-------
bestfit: XSpectrum1D
"""
meta = dict(airvac='air', headers=[None])
# Spectrum
bestfit = XSpectrum1D.from_tuple((ppfit.lam*(1+z), ppfit.bestfit/(1+z)), meta=meta)
# Convert to vacuum
bestfit.airtovac()
# Write
if outfile is not None:
bestfit.write(outfile)
# Return
return bestfit
def dump_ppxf_results(ppfit, miles, z, outfile):
"""
Write the stnadard results and the
gas_component measurements to a simple ASCII file
Parameters
----------
ppfit: ppxf
outfile: str
Returns
-------
"""
# Get the lines (air)
emission_lines, line_names, line_wave = util.emission_lines(
np.array([0.1, 0.2]), [1000., 1e5], 0, limit_doublets=False, vacuum=True)
# Construct a simple Table
gas_tbl = Table()
# Standard pPXF fit results
meta = {}
meta['EBV'] = ppfit.reddening
star_weights = ppfit.weights[~ppfit.gas_component]
star_weights = star_weights.reshape(ppfit.reg_dim)
age, metals = miles.mean_age_metal(star_weights)
meta['AGE'] = age
meta['METALS'] = metals
# Mass -- Approximate
# Mass -- This is a bit approximate as Dwv is a guess for now
actualflux = ppfit.bestfit * constants.L_sun.cgs / units.angstrom / (
4 * np.pi * (cosmo.luminosity_distance(z).to(units.cm)) ** 2 / (1 + z))
# When fitting, the routine thought our data and model spectra had same units...
Dwv = 1700. # Ang, width of the band pass
scfactor = np.median(ppfit.bestfit * (units.erg / units.s / units.cm ** 2 / units.angstrom) / actualflux) * Dwv
# To get the actual model mass required to fit spectrum, scale by this ratio
massmodels = scfactor * miles.total_mass(star_weights)
meta['LOGMSTAR'] = np.log10(massmodels.value)
gas_tbl.meta = meta
gas = ppfit.gas_component
comp = ppfit.component[gas]
gas_tbl['comp'] = comp
gas_tbl['name'] = ppfit.gas_names
gas_tbl['flux'] = ppfit.gas_flux
gas_tbl['err'] = ppfit.gas_flux_error
# Wavelengths
waves = []
for name in ppfit.gas_names:
idx = np.where(line_names == name)[0][0]
waves.append(line_wave[idx])
gas_tbl['wave'] = waves
vs = [ppfit.sol[icomp][0] for icomp in comp]
sigs = [ppfit.sol[icomp][1] for icomp in comp]
gas_tbl['v'] = vs
gas_tbl['sig'] = sigs
# Write
gas_tbl.write(outfile, format='ascii.ecsv', overwrite=True)
print("Wrote: {:s}".format(outfile))
class ppxfFit(object):
def __init__(self,ppfit,miles,weights):
""" Stripped down class of pPXf fit attributes to improve load times
and data management.
Parameters
----------
ppfit : ppxf
pPXF object output from fit_spectrum()
miles : miles
Miles object output from fit_spectrum()
weights : array
Weights on stellar pop models in fit; output from fit_spectrum()
Returns
-------
mass_no_gas : float
Total stellar mass of templates fitted.
NOTE: this value will generally need to be scaled if there is any
mismatch in units between data and models. The MILES model spectra
are generally given in units of L_sun/M_sun/Angstrom
"""
self.galaxy = ppfit.galaxy
self.nspec = ppfit.nspec # nspec=2 for reflection-symmetric LOSVD
self.npix = ppfit.npix # total pixels in the galaxy spectrum
self.noise = ppfit.noise
self.clean = ppfit.clean
self.fraction = ppfit.fraction
self.gas_reddening = ppfit.gas_reddening
self.degree = ppfit.degree
self.mdegree = ppfit.mdegree
self.method = ppfit.method
self.quiet = ppfit.quiet
self.sky = ppfit.sky
self.vsyst = ppfit.vsyst
self.regul = ppfit.regul
self.lam = ppfit.lam
self.nfev = ppfit.nfev
self.reddening = ppfit.reddening
self.reg_dim = ppfit.reg_dim
self.reg_ord = ppfit.reg_ord
self.star = ppfit.star
self.npix_temp = ppfit.npix_temp
self.ntemp = ppfit.ntemp
self.factor = ppfit.factor
self.sigma_diff = ppfit.sigma_diff
self.status = ppfit.status # Initialize status as failed
self.velscale = ppfit.velscale
self.bestfit = ppfit.bestfit
self.chi2 = ppfit.chi2
self.templates_rfft = ppfit.templates_rfft
self.goodpixels = ppfit.goodpixels
self.moments = ppfit.moments
self.error = ppfit.error
self.factor = ppfit.factor
self.npad = ppfit.npad
self.sol = ppfit.sol
self.apoly = ppfit.apoly
self.mpoly = ppfit.mpoly
self.weights = ppfit.weights
self.weights_ppxf = self.weights
self.gas_component = ppfit.gas_component
self.gas_bestfit = ppfit.gas_bestfit
self.gas_flux = ppfit.gas_flux
self.gas_flux_error = ppfit.gas_flux_error
self.gas_names = ppfit.gas_names
self.gas_mpoly = ppfit.gas_mpoly
self.weights_ppxf = self.weights
### Now for MILES stuff
self.age_grid = miles.age_grid
self.metal_grid = miles.metal_grid
self.n_ages = miles.n_ages
self.n_metal = miles.n_metal
self.mean_log_age, self.mean_metal = miles.mean_age_metal(weights, quiet=True)
self.normfactor = miles.normfactor
|
|
# pylint: disable-msg=W0311,E1101,E1103,W0201,C0103,W0622,W0402,W0706,R0911,W0613,W0612,R0912,W0141,C0111,C0121
# qp_xml: Quick Parsing for XML
#
# Written by Greg Stein. Public Domain.
# No Copyright, no Rights Reserved, and no Warranties.
#
# This module is maintained by Greg and is available as part of the XML-SIG
# distribution. This module and its changelog can be fetched at:
# http://www.lyra.org/cgi-bin/viewcvs.cgi/xml/xml/utils/qp_xml.py
#
# Additional information can be found on Greg's Python page at:
# http://www.lyra.org/greg/python/
#
# This module was added to the XML-SIG distribution on February 14, 2000.
# As part of that distribution, it falls under the XML distribution license.
#
import string
from xml.parsers import expat
error = __name__ + '.error'
#
# The parsing class. Instantiate and pass a string/file to .parse()
#
class Parser:
def __init__(self):
self.reset()
def reset(self):
self.root = None
self.cur_elem = None
def find_prefix(self, prefix):
elem = self.cur_elem
while elem:
if elem.ns_scope.has_key(prefix):
return elem.ns_scope[prefix]
elem = elem.parent
if prefix == '':
return '' # empty URL for "no namespace"
return None
def process_prefix(self, name, use_default):
idx = string.find(name, ':')
if idx == -1:
if use_default:
return self.find_prefix(''), name
return '', name # no namespace
if string.lower(name[:3]) == 'xml':
return '', name # name is reserved by XML. don't break out a NS.
ns = self.find_prefix(name[:idx])
if ns is None:
raise error, 'namespace prefix ("%s") not found' % name[:idx]
return ns, name[idx+1:]
def start(self, name, attrs):
elem = _element(name=name, lang=None, parent=None,
children=[], ns_scope={}, attrs={},
first_cdata='', following_cdata='')
if self.cur_elem:
elem.parent = self.cur_elem
elem.parent.children.append(elem)
self.cur_elem = elem
else:
self.cur_elem = self.root = elem
work_attrs = [ ]
# scan for namespace declarations (and xml:lang while we're at it)
for name, value in attrs.items():
if name == 'xmlns':
elem.ns_scope[''] = value
elif name[:6] == 'xmlns:':
elem.ns_scope[name[6:]] = value
elif name == 'xml:lang':
elem.lang = value
else:
work_attrs.append((name, value))
# inherit xml:lang from parent
if elem.lang is None and elem.parent:
elem.lang = elem.parent.lang
# process prefix of the element name
elem.ns, elem.name = self.process_prefix(elem.name, 1)
# process attributes' namespace prefixes
for name, value in work_attrs:
elem.attrs[self.process_prefix(name, 0)] = value
def end(self, name):
parent = self.cur_elem.parent
del self.cur_elem.ns_scope
del self.cur_elem.parent
self.cur_elem = parent
def cdata(self, data):
elem = self.cur_elem
if elem.children:
last = elem.children[-1]
last.following_cdata = last.following_cdata + data
else:
elem.first_cdata = elem.first_cdata + data
def parse(self, input):
self.reset()
p = expat.ParserCreate()
p.StartElementHandler = self.start
p.EndElementHandler = self.end
p.CharacterDataHandler = self.cdata
try:
if type(input) == type(''):
p.Parse(input, 1)
else:
while 1:
s = input.read(_BLOCKSIZE)
if not s:
p.Parse('', 1)
break
p.Parse(s, 0)
finally:
if self.root:
_clean_tree(self.root)
return self.root
#
# handy function for dumping a tree that is returned by Parser
#
def dump(f, root):
f.write('<?xml version="1.0"?>\n')
namespaces = _collect_ns(root)
_dump_recurse(f, root, namespaces, dump_ns=1)
f.write('\n')
#
# This function returns the element's CDATA. Note: this is not recursive --
# it only returns the CDATA immediately within the element, excluding the
# CDATA in child elements.
#
def textof(elem):
return elem.textof()
#########################################################################
#
# private stuff for qp_xml
#
_BLOCKSIZE = 16384 # chunk size for parsing input
class _element:
def __init__(self, **kw):
self.__dict__.update(kw)
def textof(self):
'''Return the CDATA of this element.
Note: this is not recursive -- it only returns the CDATA immediately
within the element, excluding the CDATA in child elements.
'''
s = self.first_cdata
for child in self.children:
s = s + child.following_cdata
return s
def find(self, name, ns=''):
for elem in self.children:
if elem.name == name and elem.ns == ns:
return elem
return None
def _clean_tree(elem):
elem.parent = None
del elem.parent
map(_clean_tree, elem.children)
def _collect_recurse(elem, dict):
dict[elem.ns] = None
for ns, name in elem.attrs.keys():
dict[ns] = None
for child in elem.children:
_collect_recurse(child, dict)
def _collect_ns(elem):
"Collect all namespaces into a NAMESPACE -> PREFIX mapping."
d = { '' : None }
_collect_recurse(elem, d)
del d[''] # make sure we don't pick up no-namespace entries
keys = d.keys()
for i in range(len(keys)):
d[keys[i]] = i
return d
def _dump_recurse(f, elem, namespaces, lang=None, dump_ns=0):
if elem.ns:
f.write('<ns%d:%s' % (namespaces[elem.ns], elem.name))
else:
f.write('<' + elem.name)
for (ns, name), value in elem.attrs.items():
if ns:
f.write(' ns%d:%s="%s"' % (namespaces[ns], name, value))
else:
f.write(' %s="%s"' % (name, value))
if dump_ns:
for ns, id in namespaces.items():
f.write(' xmlns:ns%d="%s"' % (id, ns))
if elem.lang != lang:
f.write(' xml:lang="%s"' % elem.lang)
if elem.children or elem.first_cdata:
f.write('>' + elem.first_cdata)
for child in elem.children:
_dump_recurse(f, child, namespaces, elem.lang)
f.write(child.following_cdata)
if elem.ns:
f.write('</ns%d:%s>' % (namespaces[elem.ns], elem.name))
else:
f.write('</%s>' % elem.name)
else:
f.write('/>')
|
|
# Copyright 2018 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Position yourself to catch blocks based on a visual cue.
This game proceeds in two phases. The first phase is a "programming phase",
where the player sees each of the four visual cues (green blocks at the bottom
of the game board) paired randomly with either of two additional visual cues
(larger green blocks just above the cues, called "ball symbols"). These pairings
tell the player what actions they should take in the second phase of the game.
In the second phase of the game, the player must repeatedly move itself up or
down to position itself in front of either of two blocks: a yellow block or a
cyan block. These blocks approach the player from right to left. If the player
"catches" the correct block, it receives a point. The correct block is indicted
by the visual cue shown as the blocks begin to approach the player. If the cue
was paired with the left "ball symbol" during the programming phase, the player
should catch the yellow block; otherwise it should catch the cyan block.
Each episode of "Cued Catch" starts with a different mapping from cues to
blocks. The player must learn to remember these associations in order to play
the game successfully.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import random
import sys
from pycolab import ascii_art
from pycolab import human_ui
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
# ASCII art for the game board. Not too representative: there is usually some
# cue showing on some part of the board.
GAME_ART = [
' ',
' P a ',
' b ',
' ',
' ',
' ',
' ',
]
if __name__ == '__main__': # Avoid defining flags when used as a library.
parser = argparse.ArgumentParser(
description='Play Cued Catch.',
epilog=(
'NOTE: Default options configure the game as the agents in the paper '
'played it. These settings may not be much fun for humans, though.'))
parser.add_argument('--initial_cue_duration', metavar='t', type=int,
default=10, help='Programming cue duration.')
parser.add_argument('--cue_duration', metavar='t', type=int, default=10,
help='Query cue duration.')
parser.add_argument('--num_trials', metavar='K', type=int, default=100,
help='Number of trials per episode.')
# This flag is for establishing a control that requires no long term memory.
parser.add_argument('--always_show_ball_symbol', action='store_true',
help='Control case: show ball symbols during trials.')
# This flag is for experiments that require noise-tolerant memory.
parser.add_argument('--reward_sigma', metavar='s', type=float, default=0.0,
help='Stddev for noise to add to ball-catch rewards.')
# This flag is for experiments that require very long term memory.
parser.add_argument('--reward_free_trials', metavar='K', type=int, default=40,
help='Provide no reward for the first K trials')
FLAGS = parser.parse_args()
# These colours are only for humans to see in the CursesUi.
COLOURS = {' ': (0, 0, 0), # Black background
'P': (999, 999, 999), # This is you, the player
'Q': (0, 999, 0), # Cue blocks
'a': (999, 999, 0), # Top ball
'b': (0, 999, 999)} # Bottom ball
def make_game(initial_cue_duration, cue_duration, num_trials,
always_show_ball_symbol=False,
reward_sigma=0.0,
reward_free_trials=0):
return ascii_art.ascii_art_to_game(
art=GAME_ART,
what_lies_beneath=' ',
sprites={'P': ascii_art.Partial(
PlayerSprite,
reward_sigma=reward_sigma,
reward_free_trials=reward_free_trials),
'a': BallSprite,
'b': BallSprite},
drapes={'Q': ascii_art.Partial(
CueDrape,
initial_cue_duration, cue_duration, num_trials,
always_show_ball_symbol)},
update_schedule=['P', 'a', 'b', 'Q'])
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player, the catcher."""
def __init__(self, corner, position, character,
reward_sigma=0.0, reward_free_trials=0):
"""Initialise a PlayerSprite.
Args:
corner: standard `Sprite` constructor parameter.
position: standard `Sprite` constructor parameter.
character: standard `Sprite` constructor parameter.
reward_sigma: standard deviation of reward for catching the ball (or
not). A value of 0.0 means rewards with no noise.
reward_free_trials: number of trials before any reward can be earned.
"""
super(PlayerSprite, self).__init__(
corner, position, character, impassable='', confined_to_board=True)
self._reward_sigma = reward_sigma
self._trials_till_reward = reward_free_trials
def update(self, actions, board, layers, backdrop, things, the_plot):
# Our motions are quite constrained: we can only move up or down one spot.
if actions == 1 and self.virtual_position.row > 1: # go up?
self._north(board, the_plot)
elif actions == 2 and self.virtual_position.row < 2: # go down?
self._south(board, the_plot)
elif actions in [0, 4]: # quit the game?
the_plot.terminate_episode()
else: # do nothing?
self._stay(board, the_plot) # (or can't move?)
# Give ourselves a point if we landed on the correct ball.
correct_ball = 'a' if the_plot.get('which_ball') == 'top' else 'b'
if self._reward_sigma:
if (self.position.col == things[correct_ball].position.col and
self._trials_till_reward <= 0):
the_plot.add_reward(
float(self.position == things[correct_ball].position) +
random.normalvariate(mu=0, sigma=self._reward_sigma))
else:
the_plot.add_reward(0)
else:
the_plot.add_reward(int(
self.position == things[correct_ball].position and
self._trials_till_reward <= 0
))
# Decrement trials left till reward.
if (self.position.col == things[correct_ball].position.col and
self._trials_till_reward > 0):
self._trials_till_reward -= 1
class BallSprite(plab_things.Sprite):
"""A `Sprite` for the balls approaching the player."""
def __init__(self, corner, position, character):
"""Mark ourselves as invisible at first."""
super(BallSprite, self).__init__(corner, position, character)
# Save start position.
self._start_position = position
# But mark ourselves invisible for now.
self._visible = False
def update(self, actions, board, layers, backdrop, things, the_plot):
# Wait patiently until the initial programming cues have been shown.
if not the_plot.get('programming_complete'): return
# Cues are shown; we are visible now.
self._visible = True
# If we're to the left of the player, reposition ourselves back at the start
# position and tell the cue drape to pick a new correct ball.
if self.position.col < things['P'].position.col:
self._position = self._start_position
the_plot['last_ball_reset'] = the_plot.frame
else:
self._position = self.Position(self.position.row, self.position.col - 1)
class CueDrape(plab_things.Drape):
""""Programs" the player, then chooses correct balls and shows cues.
The cue drape goes through two phases.
In the first phase, it presents each of the four cues serially along with a
symbol that indicates whether the top ball or the bottom ball is the correct
choice for that cue. (The symbol does not resemble one of the balls.) During
this phase, no balls appear. Agent actions can move the player but accomplish
nothing else. Each associational cue presentation lasts for a number of
timesteps controlled by the `initial_cue_duration` constructor argument.
Once all four cues have been shown in this way, the second phase presents a
sequence of `num_trials` fixed-length trials. In each trial, one of the four
cues is shown for `cue_duration` timesteps, and the two balls advance toward
the player from the right-hand side of the screen. The agent must position the
player to "catch" the ball that matches the cue shown at the beginning of the
trial.
The two phases can also be visually distinguished by the presence of some
additional markers on the board.
"""
_NUM_CUES = 4 # Must divide 12 evenly and be divisible by 2. So, 2, 4, 6, 12.
def __init__(self, curtain, character,
initial_cue_duration,
cue_duration,
num_trials,
always_show_ball_symbol):
super(CueDrape, self).__init__(curtain, character)
self._initial_cue_duration = initial_cue_duration
self._cue_duration = cue_duration
self._num_trials_left = num_trials
self._always_show_ball_symbol = always_show_ball_symbol
# Assign balls to each of the cues.
self._cues_to_balls = random.sample(
['top'] * (self._NUM_CUES // 2) + ['bottom'] * (self._NUM_CUES // 2),
self._NUM_CUES)
self._phase = 'first'
# State for first phase.
self._first_phase_tick = self._NUM_CUES * self._initial_cue_duration
# State for second phase, initialised to bogus values.
self._second_phase_cue_choice = -1
self._second_phase_tick = -1
self._second_phase_last_reset = -float('inf')
def update(self, actions, board, layers, backdrop, things, the_plot):
# Show the agent which phase we're in.
self._show_phase_cue(self._phase)
# Do phase-specific update.
if self._phase == 'first':
self._do_first_phase(the_plot)
elif self._phase == 'second':
self._do_second_phase(the_plot)
## Phase-specific updates.
def _do_first_phase(self, the_plot):
# Iterate through showing each of the cues.
self._first_phase_tick -= 1 # Decrement number of steps left in this phase.
cue = self._first_phase_tick // self._initial_cue_duration
self._show_ball_symbol(self._cues_to_balls[cue])
self._show_cue(cue)
# End of phase? Move on to the next phase.
if self._first_phase_tick <= 0:
self._phase = 'second'
the_plot['programming_complete'] = True
self._second_phase_reset(the_plot)
def _do_second_phase(self, the_plot):
self._show_ball_symbol('neither') # Clear ball symbol.
# Reset ourselves if the balls have moved beyond the player.
if the_plot.get('last_ball_reset') > self._second_phase_last_reset:
self._second_phase_reset(the_plot)
# Show the cue if it's still visible in this trial.
if self._second_phase_tick > 0:
self._show_cue(self._second_phase_cue_choice)
if self._always_show_ball_symbol: self._show_ball_symbol(
self._cues_to_balls[self._second_phase_cue_choice])
else:
self._show_cue(None)
self._show_ball_symbol(None)
# Countdown second phase clock.
self._second_phase_tick -= 1
def _second_phase_reset(self, the_plot):
self._second_phase_cue_choice = random.randrange(self._NUM_CUES)
the_plot['which_ball'] = self._cues_to_balls[self._second_phase_cue_choice]
self._second_phase_tick = self._cue_duration
self._second_phase_last_reset = the_plot.frame
# Terminate if we've run out of trials.
if self._num_trials_left <= 0: the_plot.terminate_episode()
self._num_trials_left -= 1
## Display helpers
def _show_phase_cue(self, phase):
self.curtain[1:3, :] = False
if phase == 'first':
self.curtain[1:3, 0:2] = True
self.curtain[1:3, -2:] = True
# No cue for the second phase.
def _show_ball_symbol(self, ball):
self.curtain[3:5, :] = False
if ball == 'top':
self.curtain[3:5, 0:6] = True
elif ball == 'bottom':
self.curtain[3:5, -6:] = True
def _show_cue(self, cue=None):
self.curtain[-2:, :] = False
if 0 <= cue < self._NUM_CUES:
width = self.curtain.shape[1] // self._NUM_CUES
l = cue * width
r = l + width
self.curtain[-2:, l:r] = True
def main(argv):
del argv # Unused.
# Build a cued_catch game.
game = make_game(FLAGS.initial_cue_duration,
FLAGS.cue_duration, FLAGS.num_trials,
FLAGS.always_show_ball_symbol,
FLAGS.reward_sigma,
FLAGS.reward_free_trials)
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_UP: 1, curses.KEY_DOWN: 2,
-1: 3,
'q': 4, 'Q': 4},
delay=200, colour_fg=COLOURS)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
main(sys.argv)
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithm Protocol
===================
For a class to be passed as a trading algorithm to the
:py:class:`zipline.lines.SimulatedTrading` zipline it must follow an
implementation protocol. Examples of this algorithm protocol are provided
below.
The algorithm must expose methods:
- initialize: method that takes no args, no returns. Simply called to
enable the algorithm to set any internal state needed.
- get_sid_filter: method that takes no args, and returns a list of valid
sids. List must have a length between 1 and 10. If None is returned the
filter will block all events.
- handle_data: method that accepts a :py:class:`zipline.protocol.BarData`
of the current state of the simulation universe. An example data object:
.. This outputs the table as an HTML table but for some reason there
is no bounding box. Make the previous paraagraph ending colon a
double-colon to turn this back into blockquoted table in ASCII art.
+-----------------+--------------+----------------+-------------------+
| | sid(133) | sid(134) | sid(135) |
+=================+==============+================+===================+
| price | $10.10 | $22.50 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| volume | 10,000 | 5,000 | 50,000 |
+-----------------+--------------+----------------+-------------------+
| mvg_avg_30 | $9.97 | $22.61 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| dt | 6/30/2012 | 6/30/2011 | 6/29/2012 |
+-----------------+--------------+----------------+-------------------+
- set_order: method that accepts a callable. Will be set as the value of the
order method of trading_client. An algorithm can then place orders with a
valid sid and a number of shares::
self.order(sid(133), share_count)
- set_performance: property which can be set equal to the
cumulative_trading_performance property of the trading_client. An
algorithm can then check position information with the
Portfolio object::
self.Portfolio[sid(133)]['cost_basis']
- set_transact_setter: method that accepts a callable. Will
be set as the value of the set_transact_setter method of
the trading_client. This allows an algorithm to change the
slippage model used to predict transactions based on orders
and trade events.
"""
from copy import deepcopy
import numpy as np
from nose.tools import assert_raises
from six.moves import range
from six import itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
FixedSlippage,
order,
set_slippage,
record,
sid,
)
from zipline.errors import UnsupportedOrderParameters
from zipline.assets import Future, Equity
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.controls import AssetDateBounds
from zipline.transforms import BatchTransform, batch_transform
class TestAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self,
sid,
amount,
order_count,
sid_filter=None,
slippage=None,
commission=None):
self.count = order_count
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
if sid_filter:
self.sid_filter = sid_filter
else:
self.sid_filter = [self.asset.sid]
if slippage is not None:
self.set_slippage(slippage)
if commission is not None:
self.set_commission(commission)
def handle_data(self, data):
# place an order for amount shares of sid
if self.incr < self.count:
self.order(self.asset, self.amount)
self.incr += 1
class HeavyBuyAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self, sid, amount):
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
def handle_data(self, data):
# place an order for 100 shares of sid
self.order(self.asset, self.amount)
self.incr += 1
class NoopAlgorithm(TradingAlgorithm):
"""
Dolce fa niente.
"""
def get_sid_filter(self):
return []
def initialize(self):
pass
def set_transact_setter(self, txn_sim_callable):
pass
def handle_data(self, data):
pass
class ExceptionAlgorithm(TradingAlgorithm):
"""
Throw an exception from the method name specified in the
constructor.
"""
def initialize(self, throw_from, sid):
self.throw_from = throw_from
self.asset = self.sid(sid)
if self.throw_from == "initialize":
raise Exception("Algo exception in initialize")
else:
pass
def set_portfolio(self, portfolio):
if self.throw_from == "set_portfolio":
raise Exception("Algo exception in set_portfolio")
else:
pass
def handle_data(self, data):
if self.throw_from == "handle_data":
raise Exception("Algo exception in handle_data")
else:
pass
def get_sid_filter(self):
if self.throw_from == "get_sid_filter":
raise Exception("Algo exception in get_sid_filter")
else:
return [self.asset]
def set_transact_setter(self, txn_sim_callable):
pass
class DivByZeroAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
self.incr += 1
if self.incr > 4:
5 / 0
pass
class TooMuchProcessingAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
def handle_data(self, data):
# Unless we're running on some sort of
# supercomputer this will hit timeout.
for i in range(1000000000):
self.foo = i
class TimeoutAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
if self.incr > 4:
import time
time.sleep(100)
pass
class RecordAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
record(name, self.incr, 'name2', 2, name3=self.incr)
class TestOrderAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 1
self.order(self.sid(0), 1)
class TestOrderInstantAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
self.last_price, "Orders was not filled at last price."
self.incr += 2
self.order_value(self.sid(0), data[0].price * 2.)
self.last_price = data[0].price
class TestOrderStyleForwardingAlgorithm(TradingAlgorithm):
"""
Test Algorithm for verifying that ExecutionStyles are properly forwarded by
order API helper methods. Pass the name of the method to be tested as a
string parameter to this algorithm's constructor.
"""
def __init__(self, *args, **kwargs):
self.method_name = kwargs.pop('method_name')
super(TestOrderStyleForwardingAlgorithm, self)\
.__init__(*args, **kwargs)
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert len(self.portfolio.positions.keys()) == 0
method_to_check = getattr(self, self.method_name)
method_to_check(self.sid(0),
data[0].price,
style=StopLimitOrder(10, 10))
assert len(self.blotter.open_orders[0]) == 1
result = self.blotter.open_orders[0][0]
assert result.limit == 10
assert result.stop == 10
self.incr += 1
class TestOrderValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.sale_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 2
multiplier = 2.
if isinstance(self.sid(0), Future):
multiplier *= self.sid(0).contract_multiplier
self.order_value(self.sid(0), data[0].price * multiplier)
class TestTargetAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.target_shares = np.random.randint(1, 30)
self.order_target(self.sid(0), self.target_shares)
class TestOrderPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(self.sid(0), .001)
if isinstance(self.sid(0), Equity):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) / data[0].price
)
if isinstance(self.sid(0), Future):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) /
(data[0].price * self.sid(0).contract_multiplier)
)
class TestTargetPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.target_shares = 1
else:
assert np.round(self.portfolio.portfolio_value * 0.002) == \
self.portfolio.positions[0]['amount'] * self.sale_price, \
"Orders not filled correctly."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.sale_price = data[0].price
self.order_target_percent(self.sid(0), .002)
class TestTargetValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
print(self.portfolio)
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_target_value(self.sid(0), 20)
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Equity):
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Future):
self.target_shares = np.round(
20 / (data[0].price * self.sid(0).contract_multiplier))
############################
# AccountControl Test Algos#
############################
class SetMaxLeverageAlgorithm(TradingAlgorithm):
def initialize(self, max_leverage=None):
self.set_max_leverage(max_leverage=max_leverage)
############################
# TradingControl Test Algos#
############################
class SetMaxPositionSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_position_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_order_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetDoNotOrderListAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, restricted_list=None):
self.order_count = 0
self.set_do_not_order_list(restricted_list)
class SetMaxOrderCountAlgorithm(TradingAlgorithm):
def initialize(self, count):
self.order_count = 0
self.set_max_order_count(count)
class SetLongOnlyAlgorithm(TradingAlgorithm):
def initialize(self):
self.order_count = 0
self.set_long_only()
class SetAssetDateBoundsAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to order 1 share of sid 0 on every bar and has an
AssetDateBounds() trading control in place.
"""
def initialize(self):
self.register_trading_control(AssetDateBounds())
def handle_data(algo, data):
algo.order(algo.sid(0), 1)
class TestRegisterTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.set_slippage(FixedSlippage())
def handle_data(self, data):
pass
class AmbitiousStopLimitAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to buy with extremely low stops/limits and tries to
sell with extremely high versions of same. Should not end up with any
positions for reasonable data.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sid'))
def handle_data(self, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
self.order(self.asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
self.order(self.asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
self.order(self.asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
self.order(self.asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
self.order(self.asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
self.order(self.asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
self.order(self.asset, 100, limit_price=.00000001)
self.order(self.asset, -100, stop_price=.00000001)
##########################################
# Algorithm using simple batch transforms
class ReturnPriceBatchTransform(BatchTransform):
def get_value(self, data):
assert data.shape[1] == self.window_length, \
"data shape={0} does not equal window_length={1} for data={2}".\
format(data.shape[1], self.window_length, data)
return data.price
@batch_transform
def return_price_batch_decorator(data):
return data.price
@batch_transform
def return_args_batch_decorator(data, *args, **kwargs):
return args, kwargs
@batch_transform
def return_data(data, *args, **kwargs):
return data
@batch_transform
def uses_ufunc(data, *args, **kwargs):
# ufuncs like np.log should not crash
return np.log(data)
@batch_transform
def price_multiple(data, multiplier, extra_arg=1):
return data.price * multiplier * extra_arg
class BatchTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history_return_price_class = []
self.history_return_price_decorator = []
self.history_return_args = []
self.history_return_arbitrary_fields = []
self.history_return_nan = []
self.history_return_sid_filter = []
self.history_return_field_filter = []
self.history_return_field_no_filter = []
self.history_return_ticks = []
self.history_return_not_full = []
self.return_price_class = ReturnPriceBatchTransform(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_price_decorator = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_args_batch = return_args_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_arbitrary_fields = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_nan = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_sid_filter = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
sids=[0]
)
self.return_field_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
fields=['price']
)
self.return_field_no_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_not_full = return_data(
refresh_period=1,
window_length=self.window_length,
compute_only_full=False
)
self.uses_ufunc = uses_ufunc(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.price_multiple = price_multiple(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.iter = 0
self.set_slippage(FixedSlippage())
def handle_data(self, data):
self.history_return_price_class.append(
self.return_price_class.handle_data(data))
self.history_return_price_decorator.append(
self.return_price_decorator.handle_data(data))
self.history_return_args.append(
self.return_args_batch.handle_data(
data, *self.args, **self.kwargs))
self.history_return_not_full.append(
self.return_not_full.handle_data(data))
self.uses_ufunc.handle_data(data)
# check that calling transforms with the same arguments
# is idempotent
self.price_multiple.handle_data(data, 1, extra_arg=1)
if self.price_multiple.full:
pre = self.price_multiple.rolling_panel.get_current().shape[0]
result1 = self.price_multiple.handle_data(data, 1, extra_arg=1)
post = self.price_multiple.rolling_panel.get_current().shape[0]
assert pre == post, "batch transform is appending redundant events"
result2 = self.price_multiple.handle_data(data, 1, extra_arg=1)
assert result1 is result2, "batch transform is not idempotent"
# check that calling transform with the same data, but
# different supplemental arguments results in new
# results.
result3 = self.price_multiple.handle_data(data, 2, extra_arg=1)
assert result1 is not result3, \
"batch transform is not updating for new args"
result4 = self.price_multiple.handle_data(data, 1, extra_arg=2)
assert result1 is not result4,\
"batch transform is not updating for new kwargs"
new_data = deepcopy(data)
for sidint in new_data:
new_data[sidint]['arbitrary'] = 123
self.history_return_arbitrary_fields.append(
self.return_arbitrary_fields.handle_data(new_data))
# nan every second event price
if self.iter % 2 == 0:
self.history_return_nan.append(
self.return_nan.handle_data(data))
else:
nan_data = deepcopy(data)
nan_data.price = np.nan
self.history_return_nan.append(
self.return_nan.handle_data(nan_data))
self.iter += 1
# Add a new sid to check that it does not get included
extra_sid_data = deepcopy(data)
extra_sid_data[1] = extra_sid_data[0]
self.history_return_sid_filter.append(
self.return_sid_filter.handle_data(extra_sid_data)
)
# Add a field to check that it does not get included
extra_field_data = deepcopy(data)
extra_field_data[0]['ignore'] = extra_sid_data[0]['price']
self.history_return_field_filter.append(
self.return_field_filter.handle_data(extra_field_data)
)
self.history_return_field_no_filter.append(
self.return_field_no_filter.handle_data(extra_field_data)
)
class BatchTransformAlgorithmMinute(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history = []
self.batch_transform = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False,
bars='minute'
)
def handle_data(self, data):
self.history.append(self.batch_transform.handle_data(data))
class SetPortfolioAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to set the portfolio directly.
The portfolio should be treated as a read-only object
within the algorithm.
"""
def initialize(self, *args, **kwargs):
pass
def handle_data(self, data):
self.portfolio = 3
class TALIBAlgorithm(TradingAlgorithm):
"""
An algorithm that applies a TA-Lib transform. The transform object can be
passed at initialization with the 'talib' keyword argument. The results are
stored in the talib_results array.
"""
def initialize(self, *args, **kwargs):
if 'talib' not in kwargs:
raise KeyError('No TA-LIB transform specified '
'(use keyword \'talib\').')
elif not isinstance(kwargs['talib'], (list, tuple)):
self.talib_transforms = (kwargs['talib'],)
else:
self.talib_transforms = kwargs['talib']
self.talib_results = dict((t, []) for t in self.talib_transforms)
def handle_data(self, data):
for t in self.talib_transforms:
result = t.handle_data(data)
if result is None:
if len(t.talib_fn.output_names) == 1:
result = np.nan
else:
result = (np.nan,) * len(t.talib_fn.output_names)
self.talib_results[t].append(result)
class EmptyPositionsAlgorithm(TradingAlgorithm):
"""
An algorithm that ensures that 'phantom' positions do not appear
portfolio.positions in the case that a position has been entered
and fully exited.
"""
def initialize(self, *args, **kwargs):
self.ordered = False
self.exited = False
def handle_data(self, data):
if not self.ordered:
for s in data:
self.order(self.sid(s), 100)
self.ordered = True
if not self.exited:
amounts = [pos.amount for pos
in itervalues(self.portfolio.positions)]
if (
all([(amount == 100) for amount in amounts]) and
(len(amounts) == len(data.keys()))
):
for stock in self.portfolio.positions:
self.order(self.sid(stock), -100)
self.exited = True
# Should be 0 when all positions are exited.
self.record(num_positions=len(self.portfolio.positions))
class InvalidOrderAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to make various invalid order calls, verifying that
appropriate exceptions are raised.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sids')[0])
def handle_data(self, data):
from zipline.api import (
order_percent,
order_target,
order_target_percent,
order_target_value,
order_value,
)
for style in [MarketOrder(), LimitOrder(10),
StopOrder(10), StopLimitOrder(10, 10)]:
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
stop_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
stop_price=10,
style=style)
##############################
# Quantopian style algorithms
# Noop algo
def initialize_noop(context):
pass
def handle_data_noop(context, data):
pass
# API functions
def initialize_api(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data_api(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
###########################
# AlgoScripts as strings
noop_algo = """
# Noop algo
def initialize(context):
pass
def handle_data(context, data):
pass
"""
api_algo = """
from zipline.api import (order,
set_slippage,
FixedSlippage,
record,
sid)
def initialize(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
"""
api_get_environment_algo = """
from zipline.api import get_environment, order, symbol
def initialize(context):
context.environment = get_environment()
handle_data = lambda context, data: order(symbol('TEST'), 1)
"""
api_symbol_algo = """
from zipline.api import (order,
symbol)
def initialize(context):
pass
def handle_data(context, data):
order(symbol('TEST'), 1)
"""
call_order_in_init = """
from zipline.api import (order)
def initialize(context):
order(0, 10)
pass
def handle_data(context, data):
pass
"""
access_portfolio_in_init = """
def initialize(context):
var = context.portfolio.cash
pass
def handle_data(context, data):
pass
"""
access_account_in_init = """
def initialize(context):
var = context.account.settled_cash
pass
def handle_data(context, data):
pass
"""
call_all_order_methods = """
from zipline.api import (order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent,
sid)
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
order_value(sid(0), 300)
order_percent(sid(0), .1)
order_target(sid(0), 100)
order_target_value(sid(0), 100)
order_target_percent(sid(0), .2)
"""
record_variables = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(incr=context.incr)
"""
record_float_magic = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(data=float('%s'))
"""
|
|
# KALPURNIA
from scrapy.spiders import BaseSpider
from urllib.request import urlopen
from urllib.parse import unquote
from urllib.parse import urlparse
from math import log10
from scrapy import Request
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from Kalpurnia.stemming.porter2 import stem
from Kalpurnia.items import Url, Posting
from scrapy.linkextractors import LinkExtractor #self.link_extractor = LinkExtractor()
import scrapy.exceptions
import scrapy
import os # clear console, delete/open files
import time # sleep ( n secons )
import json
from bs4 import BeautifulSoup
from langdetect import detect # used to detect the page language
import re
# from urlparse import urlparse # python2
# from collections import namedtuple
# from scrapy import signals
# from scrapy.xlib.pydispatch import dispatcher
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
#import re
#pattern = re.compile("^([A-Z][0-9]+)*$")
#pattern.match(string)
class KalpurniaEmotionSpider(scrapy.Spider):
name = "KalpurniaCrawler_BeautySoup"
allowed_domains = ["localhost"]
URLsDiccc = {}
sentcsDiccc = {}
URL_Id = 1
termDiccc = {}
# DicccEntry = namedtuple('DicccEntry', 'termFrec pList')
start_urls = ['http://localhost/', ]
def __init__(self, **kw):
print("Spider running")
#filelist = [ f for f in os.listdir(".") if f.endswith(".json") ]
#for f in filelist:
# os.remove(f)
# dispatcher.connect(self.SpiderKilled, signals.spider_closed)
self.link_extractor = LinkExtractor()
os.system('cls' if os.name == 'nt' else 'clear')
print("Starting in 3 seconds")
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
print("Starting in 2 seconds")
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
print("Starting in 1 seconds")
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
print("Parsing")
return
limit = 20
current = 0
def parse(self, response):
# extract all the links from the document
links = self.link_extractor.extract_links(response)
# add them to the list of requests (if they are not alreay there)
for x in links:
url = unquote(x.url)
url = url.split('&', 1)[0]
if not url in self.URLsDiccc.values():
yield Request(url, callback=self.parse)
# get the html from the body
docText = BeautifulSoup(response.body, 'html.parser')
# kill all script and style elements
# some pages insert their text using scripts,
# so it is not the best approach to remove them,
# but for now, we will
for script in docText(["style", "script"]):
script.extract() # rip it out
# extract the text from the body html
docText = docText.get_text()
language = detect(docText)
if language != 'en':
print("\nIgnored document %s\n. Detected lang %s" %
(response.url, language))
return
# tokenize by sentences
#print (self.sentcsDiccc[self.URL_Id],self.URL_Id)
# ss = self.sid.polarity_scores(docText)
# for k in sorted(ss):
# print('{0}: {1}, '.format(k, ss[k]), end='')
# print()
# assign an ID to the url
# ... = unquote( ... ).encode('utf8') # it seems python 3 alreay saves to utf8
self.URLsDiccc[self.URL_Id] = { 'url': unquote(response.url), 'title': response.css('title::text').extract_first() } #.encode('utf8')
self.URL_Id += 1
# content = response.css('#content')
# taken from
# http://stackoverflow.com/questions/6181763/converting-a-string-to-a-list-of-words
# converts the body of the document to a list of words
wordList = re.sub("[^\w]", " ", docText).split()
# print (wordList)
# print ( content.css('p *::text').extract() )
# self.ALLsDiccc[response.url] = response.css('#content *::text').extract()
# for word in content.css('p *::text').re(r'\w+'):
for word in wordList:
# self.ALLsDiccc[response.url] =
# --- No Stemming nor lowercase
#stemmedword = word.encode('utf8')
# --- Stemming & lowercase
stemmedword = stem(word.lower()) #.encode('utf8')
# if the word is not in the postings dictionary,
# add it and match it with an empty set
if stemmedword not in self.termDiccc:
self.termDiccc[stemmedword] = {}
# if the actual url / index id is not in the
# stemmedword associated dict, add it and set
# the term frecuency to 1
# (the number of times that 'stemmedword' occurs in 'URL_Id-1')
if (self.URL_Id - 1) not in self.termDiccc[stemmedword]:
self.termDiccc[stemmedword][self.URL_Id - 1] = 1
else:
# increment the term frecuency
self.termDiccc[stemmedword][self.URL_Id - 1] += 1
postingsWgts = {}
def closed(self, reason):
# Save Results
with open('urls.json', 'w') as fp:
json.dump(self.URLsDiccc, fp, indent=4)
with open('sentences.json', 'w') as fp:
json.dump(self.sentcsDiccc, fp, indent=4)
with open('postings.json', 'w') as fp:
json.dump(self.termDiccc, fp) # default=set_default, #indent = 4
# with open('TEST.json', 'w') as fp:
# json.dump(self.ALLsDiccc,fp, indent = 4)
N = len(self.URLsDiccc)
for ptng in self.termDiccc:
self.postingsWgts[ptng] = {}
# Storing DF in the list of URLs might be impractical
# self.postingsWgts[ptng]['DF'] = len(self.termDiccc[ptng])
DF = len(self.termDiccc[ptng])
for urlId in self.termDiccc[ptng]:
wtd = 1 + log10(self.termDiccc[ptng][urlId])
# we choose to store the df in every term url
self.postingsWgts[ptng][urlId] = {
'df': DF,
'count': self.termDiccc[ptng][urlId],
'weight': wtd,
'idf': log10(N / DF)
}
with open('postingsWgts.json', 'w') as fp:
json.dump(self.postingsWgts, fp)
print("\n\nFinal Results: ")
print("\n\tPostings dicc size: %d" % len(self.termDiccc))
print("\n\tURLs dicc size: %d" % len(self.URLsDiccc))
print("\n")
# with open('postings.json') as data_file:
# data = json.load(data_file)
# print data
# def SpiderKilled(self, spider):
# # second param is instance of spder about to be closed.
# print("\n\n la wea finalizada !!!!!!\n\n")
|
|
"""
Module containing PNF 2018 baseline tseep and thrush detectors.
The baseline detectors are variants of the PNF energy detector that have
precision-recall curves that are very similar to those of variants of the
Old Bird Tseep and Thrush detectors that omit the post-processing steps
(including the clip merging and suppression steps) of those detectors.
"""
import numpy as np
from vesper.pnf.pnf_energy_detector_1_0 import (
Detector, _FirFilter, _seconds_to_samples, _SeriesProcessor,
_SeriesProcessorChain)
from vesper.util.bunch import Bunch
class BaselineDetector(Detector):
"""
Baseline detector.
The baseline detector is derived from the PNF energy detector, but
replaces some computations (including for in-band power filtering and
transient-finding) with ones that emulate those of the Old Bird
detectors.
"""
def _create_power_filter(self, input_sample_rate):
filter_length = _seconds_to_samples(
self.settings.integration_time, input_sample_rate)
return _TimeIntegrator(
'Time Integrator', filter_length, input_sample_rate)
def _create_series_processors_aux(self):
s = self.settings
processors = [
_TransientFinder(
s.min_transient_duration, s.max_transient_duration),
_Clipper(
s.initial_clip_padding, s.clip_duration,
self._input_sample_rate)
]
return _SeriesProcessorChain(processors)
def _get_threshold_crossings(self, ratios, threshold):
x0 = ratios[:-1]
x1 = ratios[1:]
# Find indices where ratio rises above threshold.
t = threshold
rise_indices = np.where((x0 <= t) & (x1 > t))[0] + 1
# Find indices where ratio falls below threshold inverse.
t = 1 / t
fall_indices = np.where((x0 >= t) & (x1 < t))[0] + 1
# Convert indices to times.
rise_times = self._convert_indices_to_times(rise_indices)
fall_times = self._convert_indices_to_times(fall_indices)
# Tag rises and falls with booleans, combine, and sort.
return sorted(
[(t, True) for t in rise_times] +
[(t, False) for t in fall_times])
class _TimeIntegrator(_FirFilter):
# An alternative to making this class an `_FirFilter` subclass would
# be to use the `np.cumsum` function to compute the cumulative sum
# of the input and then the difference between the result and a
# delayed version of the result. That approach is more efficient
# but it has numerical problems for sufficiently long inputs
# (the cumulative sum of the squared samples grows ever larger, but
# the samples do not, so you'll eventually start throwing away sample
# bits), so I have chosen not to use it. An alternative would be to use
# Cython or Numba or something like that to implement the integration
# in a way that is both faster and accurate for arbitrarily long inputs.
def __init__(self, name, integration_length, input_sample_rate):
coefficients = np.ones(integration_length) / integration_length
super().__init__(name, coefficients, input_sample_rate)
_STATE_DOWN = 0
_STATE_UP = 1
_STATE_HOLDING = 2
class _TransientFinder(_SeriesProcessor):
"""Finds transients in a series of threshold crossings."""
def __init__(self, min_duration, max_duration):
self._min_duration = min_duration
self._max_duration = max_duration
self._state = _STATE_DOWN
self._start_time = 0
"""
time of start of current transient.
The value of this attribute only has meaning for the up and holding
states. It does not mean anything for the down state.
"""
def process(self, crossings):
transients = []
emit = transients.append
for time, rise in crossings:
if self._state == _STATE_DOWN:
if rise:
# rise while down
# Start new transient.
self._start_time = time
self._state = _STATE_UP
# Do nothing for fall while down.
elif self._state == _STATE_UP:
if rise:
# rise while up
if time == self._start_time + self._max_duration:
# rise right at end of maximal transient
# Emit maximal transient.
emit((self._start_time, self._max_duration))
# Return to down state. It seems a little odd that
# a rise would return us to the down state, but
# that is what happens in the original Old Bird
# detector (see line 252 of the original detector
# source code file splimflipflop.c), and we
# (somewhat arbitrarily) choose to emulate that
# here. This code should seldom execute on real
# inputs, since it should be rare for two
# consecutive rises to occur precisely
# `self._max_length` samples apart.
self._state = _STATE_DOWN
elif time > self._start_time + self._max_duration:
# rise past end of maximal transient
# Emit maximal transient
emit((self._start_time, self._max_duration))
# Start new transient.
self._start_time = time
# Do nothing for rise before end of maximal transient.
else:
# fall while up
if time < self._start_time + self._min_duration:
# fall before end of minimal transient
self._state = _STATE_HOLDING
else:
# fall at or after end of minimal transient
duration = time - self._start_time
# Truncate transient if after end of maximal transient.
if duration > self._max_duration:
duration = self._max_duration
# Emit transient.
emit((self._start_time, duration))
self._state = _STATE_DOWN
else:
# holding after short transient
if rise:
# rise while holding after short transient
if time > self._start_time + self._min_duration:
# rise follows end of minimal transient by at least
# one non-transient sample
# Emit minimal transient.
emit((self._start_time, self._min_duration))
# Start new transient.
self._start_time = time
self._state = _STATE_UP
else:
# fall while holding after short transient
if time >= self._start_time + self._min_duration:
# fall at or after end of minimal transient
# Emit minimal transient.
emit((self._start_time, self._min_duration))
self._state = _STATE_DOWN
# Do nothing for fall before end of minimal transient.
return transients
class _Clipper(_SeriesProcessor):
def __init__(self, initial_padding, duration, sample_rate):
self._initial_padding = initial_padding
self._duration = duration
self._sample_rate = sample_rate
self._length = _seconds_to_samples(duration, sample_rate)
def process(self, clips):
return [self._get_bounds(clip) for clip in clips]
# TODO: Should we do something special if the clip end index is past
# the signal end index? We currently don't worry about this.
def _get_bounds(self, clip):
start_time, _ = clip
start_time = max(start_time - self._initial_padding, 0)
start_index = _seconds_to_samples(start_time, self._sample_rate)
return (start_index, self._length)
_TSEEP_SETTINGS = Bunch(
window_type='hann',
window_size=.005, # seconds
hop_size=50, # percent
start_frequency=6000, # hertz
end_frequency=10000, # hertz
integration_time=.090, # seconds
delay=.020, # seconds
thresholds=[2], # dimensionless
min_transient_duration=.100, # seconds
max_transient_duration=.400, # seconds
initial_clip_padding=.050, # seconds
clip_duration=.300 # seconds
)
_THRUSH_SETTINGS = Bunch(
window_type='hann',
window_size=.005, # seconds
hop_size=50, # percent
start_frequency=2800, # hertz
end_frequency=5000, # hertz
integration_time=.180, # seconds
delay=.020, # seconds
thresholds=[1.3], # dimensionless
min_transient_duration=.100, # seconds
max_transient_duration=.400, # seconds
initial_clip_padding=.050, # seconds
clip_duration=.400 # seconds
)
class TseepDetector(BaselineDetector):
extension_name = 'PNF 2018 Baseline Tseep Detector 1.0'
def __init__(self, sample_rate, listener):
super().__init__(_TSEEP_SETTINGS, sample_rate, listener)
class ThrushDetector(BaselineDetector):
extension_name = 'PNF 2018 Baseline Thrush Detector 1.0'
def __init__(self, sample_rate, listener):
super().__init__(_THRUSH_SETTINGS, sample_rate, listener)
|
|
"""
Testing bebin histogram values.
"""
import numpy as np
from numpy.random import uniform
from numpy.testing import assert_allclose
from scipy.interpolate import splrep, splint
import uncertainties.unumpy as unp
import rebin
from bounded_splines import BoundedUnivariateSpline, BoundedRectBivariateSpline
# ---------------------------------------------------------------------------- #
# Tests for piecewise continuous rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_x2_same_as_x1():
"""
x2 same as x1
"""
# old size
m = 6
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0., 1., n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, y_old)
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1():
"""
x2 range surrounds x1 range
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.1, 1.2, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = [y_old_ave[0]*(x_new[1]-0.),
y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
y_old_ave[1]*(x_old[-1]-x_new[-2])]
assert_allclose(y_new, y_new_here)
assert_allclose(y_new.sum(), y_old.sum())
# ---------------------------------------------------------------------------- #
def test_x2_lower_than_x1():
"""
x2 range is completely lower than x1 range
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.2, -0.0, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, [0.,0.,0.])
assert_allclose(y_new.sum(), 0.)
# ---------------------------------------------------------------------------- #
def test_x2_above_x1():
"""
x2 range is completely above x1 range
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(1.2, 10., n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, np.zeros((n,)))
assert_allclose(y_new.sum(), 0.)
# ---------------------------------------------------------------------------- #
def test_x2_in_x1():
"""
x2 only has one bin, and it is surrounded by x1 range
"""
# old size
m = 4
# new size
n = 1
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.3, 0.65, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = ( y_old_ave[1]*(x_old[2]-x_new[0])
+ y_old_ave[2]*(x_new[1]-x_old[2]) )
assert_allclose(y_new, y_new_here)
# ---------------------------------------------------------------------------- #
def test_x2_in_x1_2():
"""
x2 has a couple of bins, each of which span more than one original bin
"""
# old size
m = 10
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([0.25, 0.55, 0.75])
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
# ---------------------------------------------------------------------------- #
def test_y1_uncertainties():
"""
x2 range surrounds x1 range, y1 has uncertainties
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.1, 1.2, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# with uncertainties
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = np.array(
[y_old_ave[0]*(x_new[1]-0.),
y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
y_old_ave[1]*(x_old[-1]-x_new[-2])]
)
# mean or nominal value comparison
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
assert_allclose(unp.nominal_values(y_new).sum(),
unp.nominal_values(y_new_here).sum())
# ---------------------------------------------------------------------------- #
# Tests for cubic-spline rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1_with_constant_distribution():
"""
x2 domain completely surrounds x1 domain
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.5, 1.5, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_left_overlap_x1_with_constant_distribution():
"""
x2 domain overlaps x1 domain from the left
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.75, 0.45, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_right_overlap_x1_with_constant_distribution():
"""
x2 domain overlaps x1 domain from the right
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.95, 1.05, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms, atol=1e-15)
# ---------------------------------------------------------------------------- #
def test_x1_surrounds_x2_with_constant_distribution():
"""
x1 domain surrounds x2
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.05, 0.26, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1_sine_spline():
"""
x2 range is completely above x1 range
using a random vector to build spline
"""
# old size
m = 5
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])
subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])
y_old = 1.+np.sin(x_old[:-1]*np.pi)
# compute spline ----------------------------------
x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
xx = np.hstack([x_old[0], x_mids, x_old[-1]])
yy = np.hstack([y_old[0], y_old, y_old[-1]])
# build spline
spl = splrep(xx, yy)
area_old = np.array(
[ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])
# computing subbin areas
area_subbins = np.zeros((subbins.size-1,))
for i in range(area_subbins.size):
a, b = subbins[i:i+2]
a = max([a,x_old[0]])
b = min([b,x_old[-1]])
if b>a:
area_subbins[i] = splint(a, b, spl)
# summing subbin contributions in y_new_ref
y_new_ref = np.zeros((x_new.size-1,))
y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]
y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]
# call rebin function
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_ref)
# ---------------------------------------------------------------------------- #
def test_y1_uncertainties_spline_with_constant_distribution():
"""
"""
# old size
m = 5
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])
subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])
y_old = 1.+np.sin(x_old[:-1]*np.pi)
# compute spline ----------------------------------
x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
xx = np.hstack([x_old[0], x_mids, x_old[-1]])
yy = np.hstack([y_old[0], y_old, y_old[-1]])
# build spline
spl = splrep(xx, yy)
area_old = np.array(
[ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])
# with uncertainties
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# computing subbin areas
area_subbins = np.zeros((subbins.size-1,))
for i in range(area_subbins.size):
a, b = subbins[i:i+2]
a = max([a,x_old[0]])
b = min([b,x_old[-1]])
if b>a:
area_subbins[i] = splint(a, b, spl)
# summing subbin contributions in y_new_ref
a = np.zeros((x_new.size-1,))
y_new_ref = unp.uarray(a,a)
y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]
y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]
# call rebin function
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
# mean or nominal value comparison
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_ref))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_ref))
# ---------------------------------------------------------------------------- #
# Tests for 2d rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_2d_same():
"""
x1, y1 == x2, y2 implies z1 == z2
2d
"""
# old size
m = 20
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
y_old = np.linspace(-0.5, 1.5, n+1)
z_old = np.random.random((m,n))
# rebin
z_new = rebin.rebin2d(x_old, y_old, z_old, x_old, y_old)
assert_allclose(z_old, z_new)
# ---------------------------------------------------------------------------- #
def test_2d_constant_distribution():
"""
various new domains with a constant underlying distribution
2d
"""
# old size
m = 8
n = 11
# new size
p = 5
q = 14
new_bounds = [ (0., 1., -1.5, 1.7),
(0., 1., -1.5, 0.7),
(0., 1., -1.5, -0.7),
(-1., 1.5, -1.5, 1.7),
(-1., 0.5, -1., 0.5),
(0.1, 0.6, 0.1, 0.5),
(0.01, 0.02, -10.0, 20.7)]
for (a,b,c,d) in new_bounds:
# bin edges
x_old = np.linspace(0., 1., m+1)
y_old = np.linspace(-0.5, 1.5, n+1)
x_new = np.linspace(a, b, p+1)
y_new = np.linspace(c, d, q+1)
# constant spline
z_old = np.ones((m+1,n+1))
mms_spline = BoundedRectBivariateSpline(x_old, y_old, z_old, s=0.)
z_old = np.zeros((m,n))
for i in range(m):
for j in range(n):
z_old[i,j] = mms_spline.integral(x_old[i], x_old[i+1],
y_old[j], y_old[j+1])
z_new_mms = np.zeros((p,q))
for i in range(p):
for j in range(q):
z_new_mms[i,j] = mms_spline.integral(x_new[i], x_new[i+1],
y_new[j], y_new[j+1])
# rebin
z_new = rebin.rebin2d(x_old, y_old, z_old, x_new, y_new)
assert_allclose(z_new, z_new_mms)
def test_GH9():
x_old = np.array([1.5, 2.5, 3.5, 4.5, 5.5, 6.5])
y_old = np.array([10, 10, 10, 10, 10])
x_new = np.array([1.7, 2.27332857, 2.84665714, 3.41998571,
3.99331429, 4.56664286])
y_new = rebin.rebin(x_old, y_old, x_new)
assert_allclose(y_new,
[5.7332857] * 5)
# with uncertainties
y_old = np.array([11., 12., 13., 14., 15.])
y_old = unp.uarray(y_old, 0.1 * y_old)
# rebin
y_new = rebin.rebin_piecewise_constant(x_old, y_old, x_new)
# compute answer here to check rebin
y_old_ave = y_old / np.diff(x_old)
y_new_here = np.array(
[y_old_ave[0] * (x_new[1] - x_new[0]),
y_old_ave[0] * (x_old[1] - x_new[1]) +
y_old_ave[1] * (x_new[2] - x_old[1]),
y_old_ave[1] * (x_new[3] - x_new[2]),
y_old_ave[1] * (x_old[2] - x_new[3]) +
y_old_ave[2] * (x_new[4] - x_old[2]),
y_old_ave[3] * (x_new[5] - x_old[3]) +
y_old_ave[2] * (x_old[3] - x_new[4])])
# mean or nominal value comparison
# assert_allclose(unp.nominal_values(y_new),
# unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
|
|
"Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
import imp
import sys
import os
import threading
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store = SortedDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels = {},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models = SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors = {},
# -- Everything below here is only used when populating the cache --
loaded = False,
handled = {},
postponed = [],
nesting_level = 0,
write_lock = threading.RLock(),
_get_models_cache = {},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
self.write_lock.acquire()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
self.write_lock.release()
def _label_for(self, app_mod):
"""
Return app_label for given models module.
"""
return app_mod.__name__.split('.')[-2]
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
self.app_labels[self._label_for(models)] = models
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
self.write_lock.acquire()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
self.write_lock.release()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None,
include_auto_created=False, include_deferred=False,
only_installed=True):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred, only_installed)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
if app_mod in self.app_store:
app_list = [self.app_models.get(self._label_for(app_mod),
SortedDict())]
else:
app_list = []
else:
if only_installed:
app_list = [self.app_models.get(app_label, SortedDict())
for app_label in self.app_labels.iterkeys()]
else:
app_list = self.app_models.itervalues()
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred) and
(not model._meta.auto_created or include_auto_created))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name,
seed_cache=True, only_installed=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
return None
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.object_name.lower()
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(sys.modules[model.__module__].__file__)
fname2 = os.path.abspath(sys.modules[model_dict[model_name].__module__].__file__)
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
|
|
#!/usr/bin/env python
# ASCii VMSS Console - The power is in the terminal...
"""
Copyright (c) 2016, Marcelo Leal
Description: The power is in the terminal...
License: MIT (see LICENSE.txt file for details)
"""
from azure import *
#Linux or Windows?
oursystem = platform.system();
#Our Home...
HOMEUSER = expanduser("~")
HOMEDIR = HOMEUSER + "/.asciivmssdashboard"
# Load Azure app defaults
filepresent = 1
try:
with open(HOMEDIR + '/asciivmssdashboard.json') as configFile:
configData = json.load(configFile)
except IOError or FileNotFoundError:
# ---> In case we do not find our asciivmssdashboard.json config file, we will run in demo mode...
filepresent = 0
#sys.exit()
try:
demoEnabled = configData['demoEnabled']
except:
demoEnabled = "Yes"
# ---> "Missing 'demoEnabled' configuration parameter. So, to keep it simple, we will run in demo mode..."
# ---> "Use the asciivmssdashboard.json.tmpl file as a template to fill in your custom values..."
try:
animationEnabled = configData['animationEnabled']
if filepresent:
configFile.close()
except:
if (oursystem == "Linux"):
animationEnabled = "Yes"
else:
animationEnabled = "No"
# ---> "Missing 'animationEnabled' configuration parameter. Because we like it and was not simple to implement, we will assume you also like it..."
# ---> "Use the asciivmssdashboard.json.tmpl file as a template to fill in your custom values..."
if filepresent:
configFile.close()
#sys.exit()
# app state variables
vmssProperties = [];
vmssVmProperties = [];
vmssVmInstanceView = '';
access_token="";
# Curses...
window_continents = {'northandcentralamerica':0,'southamerica':0,'europeandasia':0,'africa':0,'oceania':0};
panel_continents = {'northandcentralamerica':0,'southamerica':0,'europeandasia':0,'africa':0,'oceania':0};
window_information = {'vmss_info':0,'system':0,'status':0,'virtualmachines':0,'vm':0,'monitor':0,'usage':0,'gauge':0,'gaugeas':0,'gaugerc':0,'gaugevm':0,'gaugess':0,'log':0,'insightsone':0,'insightstwo':0,'exit':0,'error':0,'logo':0,'cmd':0,'help':0};
panel_information = {'vmss_info':0,'system':0,'status':0,'virtualmachines':0,'vm':0,'monitor':0,'usage':0,'gauge':0,'gaugeas':0,'gaugerc':0,'gaugevm':0,'gaugess':0,'log':0,'insightsone':0,'insightstwo':0,'exit':0,'error':0,'logo':0,'cmd':0,'help':0};
#Draw Helper...
def draw_helper(geo, termsize):
#First we create the window in the right location...
if (animationEnabled.lower() == 'yes'):
if (geo == 'northandcentralamerica'):
window_continents['northandcentralamerica'] = create_window(26, 86, 1, termsize[1] + 1);
if (geo == 'southamerica'):
window_continents['southamerica'] = create_window(20, 27, 26, termsize[1] + 1);
if (geo == 'europeandasia'):
window_continents['europeandasia'] = create_window(26, 109, termsize[0] + 1, 125);
if (geo == 'africa'):
window_continents['africa'] = create_window(20, 38, termsize[0] + 1, 121);
if (geo == 'oceania'):
window_continents['oceania'] = create_window(15, 48, termsize[0] + 1, 180);
else:
if (geo == 'northandcentralamerica'):
window_continents['northandcentralamerica'] = create_window(26, 86, 1, 39);
if (geo == 'southamerica'):
window_continents['southamerica'] = create_window(20, 27, 26, 86);
if (geo == 'europeandasia'):
window_continents['europeandasia'] = create_window(26, 109, 3, 125);
if (geo == 'africa'):
window_continents['africa'] = create_window(20, 38, 19, 121);
if (geo == 'oceania'):
window_continents['oceania'] = create_window(15, 48, 28, 180);
#Create the other panels...
panel_continents[geo] = new_panel(window_continents[geo]);
draw_map(window_continents[geo], geo);
mark_regions_map(window_continents[geo], geo);
#Do the animation if needed...
if (animationEnabled.lower() == 'yes'):
if (geo == 'northandcentralamerica'):
win_animation(panel_continents['northandcentralamerica'], termsize, 1, 38);
if (geo == 'southamerica'):
win_animation(panel_continents['southamerica'], termsize, 26, 86);
if (geo == 'europeandasia'):
win_animation(panel_continents['europeandasia'], termsize, 3, 125);
if (geo == 'africa'):
win_animation(panel_continents['africa'], termsize, 19, 121);
if (geo == 'oceania'):
win_animation(panel_continents['oceania'], termsize, 28, 180);
def main(): #{
#Initialize...
COLSTART=100; SZ = 0;
oursystem = platform.system();
stdscr = initscr();
modo = "REAL";
if (demoEnabled.lower() == 'yes'):
modo = "DEMO";
if (oursystem == "Disabledbecausewasconsuming1proconLinux"):
# Non-block when waiting for getch (cmd prompt).
# This does not work on Windows, so we will not be able to exit nicely...
stdscr.nodelay(1);
#Just a workaround for the SSL warning...
cur_version = sys.version_info;
if (cur_version.major == 2):
requests.packages.urllib3.disable_warnings()
termsize = getmaxyx(stdscr);
if (termsize[0] >= 55 and termsize[1] >= 235):
SZ = 1;
else:
if (oursystem == "Linux"):
errnr = resize_terminal();
SZ == errnr;
else:
SZ = 0;
if (SZ == 0):
endwin();
print ("You need a terminal at least 55x235...");
print ("If you are running this application on Linux, you can resize your terminal using: resize -s 55 235.");
sys.exit(1);
if (not has_colors()):
print ("You need to have colors")
sys.exit(1);
start_color();
set_colors();
noecho();
curs_set(False);
keypad(stdscr,True);
#Our main window with margin and our title...
#newwin(lines, colunms, startline, startcolunm);
window = newwin(0, 0, 0, 0);
box(window);
panel = new_panel(window);
#Window Headers...
write_str(window, 0, 5, "| ASCii VMSS Dashboard - Version: 2.0 |");
write_str(window, 0, 50, " PYTHON Version: ");
write_str(window, 0, 67, cur_version.major);
write_str(window, 0, 68, "x ");
write_str(window, 0, 77, "| Platform: ");
write_str(window, 0, 89, oursystem);
write_str(window, 0, 89 + len(oursystem), " |");
write_str(window, 0, 108, "| Execution Mode: ");
if modo == "REAL":
write_str_color(window, 0, 126, modo, 6, 0);
else:
write_str(window, 0, 126, modo);
write_str(window, 0, 126 + len(modo), " |");
write_str(window, 0, termsize[1] - 28, " Window Size: ");
write_str(window, 0, termsize[1] - 14, str(termsize));
#Here starts our game...
#Continents create_window(lines, colunms, startline, startcolunm)
# NORTHAMERICA
draw_helper('northandcentralamerica', termsize);
# SOUTHAMERICA
draw_helper('southamerica', termsize);
# EUROPEANDASIA
draw_helper('europeandasia', termsize);
# AFRICA
draw_helper('africa', termsize);
# OCEANIA
draw_helper('oceania', termsize);
#Create all information windows...
window_information['vmss_info'] = create_window(6, 90, 48, 105);
panel_information['vmss_info'] = new_panel(window_information['vmss_info']);
box(window_information['vmss_info']);
write_str_color(window_information['vmss_info'], 0, 5, " GENERAL INFO ", 3, 0);
window_information['system'] = create_window(6, 38, 48, 195);
box(window_information['system']);
panel_information['system'] = new_panel(window_information['system']);
write_str_color(window_information['system'], 0, 5, " SYSTEM INFO ", 3, 0);
window_information['status'] = create_window(3, 36, 1, 2);
panel_information['status'] = new_panel(window_information['status']);
box(window_information['status']);
write_str_color(window_information['status'], 0, 5, " STATUS ", 3, 1);
write_str(window_information['status'], 1, 2, "Updated at");
window_information['virtualmachines'] = create_window(32, 54, 22, 2);
panel_information['virtualmachines'] = new_panel(window_information['virtualmachines']);
box(window_information['virtualmachines']);
write_str_color(window_information['virtualmachines'], 0, 18, " VIRTUAL MACHINES ", 3, 0);
create_virtualmachines_form(window_information['virtualmachines']);
window_information['monitor'] = create_window(3, 54, 19, 2);
box(window_information['monitor']);
panel_information['monitor'] = new_panel(window_information['monitor']);
write_str_color(window_information['monitor'], 0, 5, " VM UPDATE MONITOR ", 3, 0);
write_str_color(window_information['monitor'], 1, 2, "Processing Virtual Machine: ", 4, 1);
window_information['usage'] = create_window(15, 36, 4, 2);
box(window_information['usage']);
panel_information['usage'] = new_panel(window_information['usage']);
write_str_color(window_information['usage'], 0, 5, " COMPUTE USAGE ", 3, 0);
create_usage_form(window_information['usage']);
window_information['log'] = create_window(18, 195, 1, 38);
panel_information['log'] = new_panel(window_information['log']);
hide_panel(panel_information['log']);
box(window_information['log']);
write_str_color(window_information['log'], 0, 5, " LOG ", 3, 0);
window_information['insightsone'] = create_window(15, 177, 19, 56);
panel_information['insightsone'] = new_panel(window_information['insightsone']);
hide_panel(panel_information['insightsone']);
box(window_information['insightsone']);
write_str_color(window_information['insightsone'], 0, 5, " INSIGHTS METRIC #1 ", 3, 0);
window_information['insightstwo'] = create_window(11, 144, 34, 89);
panel_information['insightstwo'] = new_panel(window_information['insightstwo']);
hide_panel(panel_information['insightstwo']);
box(window_information['insightstwo']);
write_str_color(window_information['insightstwo'], 0, 5, " INSIGHTS METRIC #2 ", 3, 0);
window_azure = create_window(3, 16, 45, 89);
panel_azure = new_panel(window_azure);
write_str_color(window_azure, 1, 1, " AZURE ", 3, 0);
window_information['logo'] = create_window(7, 16, 47, 89);
panel_information['logo'] = new_panel(window_information['logo']);
box(window_information['logo']);
draw_logo(window_information['logo']);
window_information['exit'] = create_window(8, 57, 22, 88);
panel_information['exit'] = new_panel(window_information['exit']);
hide_panel(panel_information['exit']);
box(window_information['exit']);
write_str_color(window_information['exit'], 3, 5, "Waiting for Console Update threads to close...", 4, 1);
window_information['error'] = create_window(3, 128, 42, 105);
panel_information['error'] = new_panel(window_information['error']);
hide_panel(panel_information['error']);
box(window_information['error']);
write_str_color(window_information['error'], 1, 2, " ", 9, 0);
write_str_color(window_information['error'], 1, 9, " ERROR ", 4, 1);
write_str_color(window_information['error'], 1, 16, " ", 9, 0);
window_information['cmd'] = create_window(3, 128, 45, 105);
panel_information['cmd'] = new_panel(window_information['cmd']);
box(window_information['cmd']);
write_str_color(window_information['cmd'], 0, 5, " PROMPT ", 3, 0);
write_str_color(window_information['cmd'], 1, 3, ">", 4, 1);
create_prompt_form(window_information['cmd']);
window_information['help'] = create_window(20, 33, 34, 56);
box(window_information['help']);
panel_information['help'] = new_panel(window_information['help']);
hide_panel(panel_information['help']);
write_str_color(window_information['help'], 0, 5, " HELP ", 3, 0);
create_help_form(window_information['help']);
window_information['vm'] = create_window(20, 33, 34, 56);
box(window_information['vm']);
panel_information['vm'] = new_panel(window_information['vm']);
hide_panel(panel_information['vm']);
write_str_color(window_information['vm'], 0, 5, " VM ", 3, 0);
create_vm_form(window_information['vm']);
#Gauge Container Window...
window_information['gauge'] = create_window(7, 34, 11, 3);
box(window_information['gauge']);
panel_information['gauge'] = new_panel(window_information['gauge']);
#Gauge Windows...
y = 5;
gaugeaux = "gaugeas gaugerc gaugevm gaugess";
for x in gaugeaux.split():
window_information[x] = create_window(5, 6, 12, y);
box(window_information[x]);
panel_information[x] = new_panel(window_information[x]);
y += 8;
y = 3;
gaugeaux = "AS RC VM SS";
for x in gaugeaux.split():
write_str_color(window_information['gauge'], 6, y, " ", 3, 0);
write_str_color(window_information['gauge'], 6, y+1, x, 3, 0);
write_str_color(window_information['gauge'], 6, y+3, " ", 3, 0);
y += 8;
#Update the whole thing...
update_panels();
doupdate();
#Are we for real??
#Our thread that updates all VMSS info (Default Refresh Interval: 5)...
vmss_monitor_thread(window_information, panel_information, window_continents, panel_continents, demoEnabled);
endwin();
return 0;
if (__name__ == "__main__"): #{
main();
#}
|
|
from __future__ import unicode_literals
import re
import tempfile
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union
from django.contrib.gis.geos import (
GeometryCollection, GEOSGeometry, LinearRing, LineString, Point, Polygon,
fromstr,
)
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
from ..utils import no_oracle, oracle, postgis, spatialite
from .models import (
City, Country, Feature, MinusOneSRID, NonConcreteModel, PennsylvaniaCity,
State, Track,
)
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnlessDBFeature("gis_enabled")
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(
# SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157))
# )
# FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
# Used ogr.py in gdal 1.4.1 for this transform
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)'
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
if spatialite and connection.ops.spatial_version < (3, 0, 0):
# SpatiaLite < 3 does not support missing SRID values.
return
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = six.StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertListEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("gis_enabled")
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not (spatialite and connection.ops.spatial_version < (3, 0, 0)):
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
self.skipTest("PostGIS 2.0/2.0.1 left and right lookups are known to be buggy.")
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnlessDBFeature("gis_enabled")
@ignore_warnings(category=RemovedInDjango21Warning)
class GeoQuerySetTest(TestCase):
fixtures = ['initial']
# Please keep the tests in GeoQuerySet method's alphabetic order
@skipUnlessDBFeature("has_centroid_method")
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertTrue(s.poly.centroid.equals_exact(s.centroid, tol))
@skipUnlessDBFeature(
"has_difference_method", "has_intersection_method",
"has_sym_difference_method", "has_union_method")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwy with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_envelope_method")
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("supports_extent_aggr")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_extent(self):
"""
Testing the (deprecated) `extent` GeoQuerySet method and the Extent
aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent1 = qs.extent()
extent2 = qs.aggregate(Extent('point'))['point__extent']
for extent in (extent1, extent2):
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).extent())
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
@skipUnlessDBFeature("has_force_rhr_method")
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@skipUnlessDBFeature("has_geohash_method")
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_gml_method")
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn('<gml:pos srsDimension="2">', City.objects.gml(version=3).get(name='Pueblo').gml)
@skipUnlessDBFeature("has_kml_method")
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_make_line(self):
"""
Testing the (deprecated) `make_line` GeoQuerySet method and the MakeLine
aggregate.
"""
if not connection.features.supports_make_line_aggr:
# Only PostGIS has support for the MakeLine aggregate. For other
# backends, test that NotImplementedError is raised
self.assertRaises(
NotImplementedError,
City.objects.all().aggregate, MakeLine('point')
)
return
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line1 = City.objects.make_line()
line2 = City.objects.aggregate(MakeLine('point'))['point__makeline']
for line in (line1, line2):
self.assertTrue(ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line))
@skipUnlessDBFeature("has_num_geom_method")
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections.
self.assertEqual(1, c.num_geom)
@skipUnlessDBFeature("supports_num_points_poly")
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_point_on_surface_method")
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertTrue(ref[c.name].equals_exact(c.point_on_surface, tol))
@skipUnlessDBFeature("has_reverse_method")
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@skipUnlessDBFeature("has_scale_method")
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@skipUnlessDBFeature("has_snap_to_grid_method")
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid,
tol
)
)
@skipUnlessDBFeature("has_svg_method")
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_transform_method")
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@skipUnlessDBFeature("has_translate_method")
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@skipUnlessDBFeature("has_unionagg_method")
@no_oracle
@ignore_warnings(category=RemovedInDjango20Warning)
def test_unionagg(self):
"""
Testing the (deprecated) `unionagg` (aggregate union) GeoQuerySet method
and the Union aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
self.assertRaises(ValueError, qs.aggregate, Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
u3 = qs.aggregate(Union('point'))['point__union']
u4 = qs.order_by('name').aggregate(Union('point'))['point__union']
tol = 0.00001
self.assertTrue(union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertTrue(union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
self.assertTrue(union1.equals_exact(u3, tol) or union2.equals_exact(u3, tol))
self.assertTrue(union1.equals_exact(u4, tol) or union2.equals_exact(u4, tol))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.unionagg(field_name='point'))
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Test that using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
expected = ['Dallas', 'Houston']
if not connection.features.supports_real_shape_operations:
expected.append('Oklahoma City')
self.assertEqual(
list(tex_cities.values_list('name', flat=True)),
expected
)
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
|
|
__author__ = 'thor'
import pymongo as mg
from pymongo import MongoClient
from pymongo.errors import CursorNotFound
import os
import re
import pandas as pd
from numpy import inf, random, int64, int32, ndarray, float64, float32
import subprocess
from datetime import datetime
from pymongo.collection import Collection
from pymongo.errors import InvalidOperation
from ut.daf.to import dict_list_of_rows
from ut.daf.manip import rm_cols_if_present
from ut.daf.manip import rollout_cols
from ut.serialize.s3 import S3
from ut.pfile.to import ungzip
from ut.util.log import printProgress
import numpy as np
from ut.pdict.manip import recursively_update_with
from ut.dpat.buffer import ThreshBuffer
s3_backup_bucket_name = 'mongo-db-bak'
def autorefresh_cursor_iterator(cursor):
def refresh_when_cursor_not_found(cursor):
while True:
try:
yield next(cursor)
except CursorNotFound:
cursor = restart_find_cursor(cursor)
yield next(cursor)
return refresh_when_cursor_not_found(cursor)
def restart_find_cursor(cursor, docs_retrieved_so_far=None):
if docs_retrieved_so_far is None:
docs_retrieved_so_far = cursor._Cursor__retrieved
kwargs = dict(spec=cursor._Cursor__query_spec(),
fields=cursor._Cursor__fields,
skip=cursor._Cursor__skip + docs_retrieved_so_far)
if cursor._Cursor__limit:
new_limit = max(0, cursor._Cursor__limit - docs_retrieved_so_far)
if new_limit > 0:
kwargs = dict(kwargs, limit=new_limit)
else: # in this particular case, we should return an empty cursor. This is my hack for that.
new_cursor = cursor._Cursor__collection.find(**kwargs).limit(1)
next(new_cursor)
return new_cursor
return cursor._Cursor__collection.find(**kwargs)
class BulkMgUpdates(object):
"""
A class to accumulate update instructions and flush them (to perform a bulk mongo update).
See also BulkUpdateBuffer.
When constructed, once must specify a collection object mgc that the data must be stored in.
The push method pushes a (spec, document) tuple to the bulk_op list.
The flush method actually does the bulk write of the items pushed so far.
These methods can be used "manually" to get data written, but a common use of BulkMgUpdates is to used the feed
method, which takes an iterator of items, pushes the corresponding (spec, document) tuples, flushes at regular
intervals, and when the iterator is completely consumed, will do a final flush.
The regularity of the flushes during a feed call is controlled by the flush_every attribute.
How to get (spec, document) tuples from the items the iterator yields is specified by the get_spec_and_doc
attribute: A callable taking an item the iterator yields and returning the corresponding (spec, document)
"""
def __init__(self, mgc, flush_every=500, get_spec_and_doc=None):
"""
:param mgc: mongo collection (pymongo.collection.Collection object)
:param flush_every: How often should data be flushed to the db when calling the feed method
:param get_spec_and_doc: The function to be applied to the elements of the iterator in the feed method to get
(spec, document) tuples from iterator elements. If not specified, will assume the iterator is feeding
(spec, document) tuples.
"""
self.mgc = mgc
self.updater = None
self.initialize()
self.flush_every = flush_every
if get_spec_and_doc is None:
get_spec_and_doc = lambda x: x
self.get_spec_and_doc = get_spec_and_doc
def initialize(self):
self.updater = self.mgc.initialize_unordered_bulk_op()
def push(self, spec, document):
return self.updater.find(spec).upsert().update(document)
def flush(self):
r = self.updater.execute()
self.initialize()
return r
def feed(self, it):
self.initialize()
for i, x in enumerate(it, 1):
spec, document = self.get_spec_and_doc(x)
self.push(spec, document)
if i % self.flush_every == 0:
self.flush()
self.flush()
class BulkUpdateBuffer(ThreshBuffer):
def __init__(self, mgc, max_buf_size=500, upsert=False):
"""
Accumulate and execute bulk update operations.
An object to have the convenience to just "push and forget" bulk operations.
By push, we mean "just add an update operation".
By forget, we mean "don't have to remember to execute the bulk operation since the object will do so
automatically as soon as the threshold flush_when_buffer_size_is_over is reached.
Note: That said, you can't completely forget since you should run self.flush_operations() at the end
of an iteration of add_operation instructions to make sure the remainder of operations (accumulated, but
below the threshold) will be written to the target collection.
:param mgc: The mongo collection (a pymongo.collection.Collection object) to update
:param flush_when_buffer_size_is_over: The number of operations after which to do a bulk update.
:param upsert: Whether to use update with upsert or not (default False)
>>> from pymongo import MongoClient
>>> from numpy.random import randint
>>>
>>> i = 0
>>> def rand_doc():
... global i
... i += 1
... return {'a': i, 'b': list(randint(0, 9, 3))}
...
>>> # get an empty test collection
>>> mgc = MongoClient()['test']['test']
>>> _ = mgc.remove({})
>>> print(len(list(mgc.find())))
0
>>> # Use BulkUpdateBuffer to bulk insert 7 docs (with flush when buffer is size 3)
>>> bub = BulkUpdateBuffer(mgc, max_buf_size=3, upsert=True)
>>> for i in range(7):
... doc = rand_doc()
... _ = bub.push({'spec': {'a': doc['a']},
... 'document': {'$set': {'b': doc['b']}}});
>>> # Note that 6 out of 7 docs are in the collection
>>> print(len(list(mgc.find())))
6
>>> # This is why, when one is done with consuming the doc iterator, one should always flush the operations.
>>> _ = bub.flush();
>>> print(len(list(mgc.find())))
7
>>> # showing how, when using iterate, all docs are flushed
>>> it = ({'spec': {'a': doc['a']},
... 'document': {'$set': {'b': doc['b']}}}
... for doc in (rand_doc() for i in range(4)))
>>> _ = bub.iterate(it)
>>> print(len(list(mgc.find())))
11
"""
self.mgc = mgc
super(BulkUpdateBuffer, self).__init__(thresh=max_buf_size)
self.initialize()
self.upsert = upsert
def initialize(self):
super(BulkUpdateBuffer, self).initialize()
self._buf_size = 0
self._buf = self.mgc.initialize_unordered_bulk_op()
def buf_val_for_thresh(self):
return self._buf_size
def _push(self, item):
if isinstance(item, dict):
if self.upsert:
self._buf.find(item.get('spec')).upsert().update(item.get('document'))
else:
self._buf.find(item.get('spec')).update(item.get('document'))
self._buf_size += 1
else:
for _item in item:
self._push(_item)
def _flush(self):
try:
return self._buf.execute()
except InvalidOperation as e:
if str(e) != 'No operations to execute':
raise e
def push(self, item):
"""
Push an operation to the buffer
:param item: A bulk operation. A dict containing a "spec" and a "document" field.
:return: If operations were flushed, returns what every flush_operation returns, if not returns None.
"""
return super(BulkUpdateBuffer, self).push(item)
def flush(self):
"""
Call bulk_mgc.execute() to write (and flush) all operations that have been added.
Also reinitialize the buffer_size to 0 and reintialize unordered_bulk_op.
:return: What ever bulk_mgc.execute() returns
"""
return super(BulkUpdateBuffer, self).flush()
class KeyedBulkUpdateBuffer(BulkUpdateBuffer):
def __init__(self, key_fields, mgc, max_buf_size=500, upsert=False, assert_all_keys=True):
"""
Accumulate and execute bulk update operations with specified key_fields.
This uses the parent class BulkUpdateBuffer, where items are no longer explicit {'spec': SPEC, 'document': DOC}
specifications, but "flat docs" from which the key_fields are extracted to form the SPEC dict.
See BulkUpdateBuffer for mor information.
:param key_fields: A list/tuple/array/set of fields to use for the 'spec' of an update operation
:param mgc: The mongo collection (a pymongo.collection.Collection object) to update
:param flush_when_buffer_size_is_over: The number of operations after which to do a bulk update.
:param upsert: Whether to use update with upsert or not (default False)
:param assert_all_keys: Whether to assert that all "spec" keys are present
>>> from pymongo import MongoClient
>>> from numpy.random import randint
>>>
>>> i = 0
>>> def rand_doc():
... global i
... i += 1
... return {'a': i, 'b': list(randint(0, 9, 3))}
...
>>> # get an empty test collection
>>> mgc = MongoClient()['test']['test']
>>> _ = mgc.remove({})
>>> print(len(list(mgc.find())))
0
>>> # Use BulkUpdateBuffer to bulk insert 7 docs (with flush when buffer is size 3)
>>> bub = KeyedBulkUpdateBuffer(['a'], mgc, max_buf_size=3, upsert=True)
>>> for i in range(7):
... _ = bub.push(rand_doc())
>>> # Note that 6 out of 7 docs are in the collection
>>> print(len(list(mgc.find())))
6
>>> # This is why, when one is done with consuming the doc iterator, one should always flush the operations.
>>> _ = bub.flush();
>>> print(len(list(mgc.find())))
7
>>> # showing how, when using iterate, all docs are flushed
>>> it = (rand_doc() for i in range(4))
>>> _ = bub.iterate(it)
>>> print(len(list(mgc.find())))
11
"""
super(KeyedBulkUpdateBuffer, self).__init__(mgc=mgc, max_buf_size=max_buf_size, upsert=upsert)
self.key_fields = set(key_fields)
self.assert_all_keys = assert_all_keys
def _push(self, item):
_item = {'spec': {}, 'document': {"$set": {}}}
for k, v in item.items():
if k in self.key_fields:
_item['spec'][k] = v
else:
_item['document']['$set'][k] = v
if self.assert_all_keys:
assert self.key_fields == set(_item['spec']), \
"Some update keys are missing. All items should have fields: {}".format(self.key_fields)
super(KeyedBulkUpdateBuffer, self)._push(_item)
def bulk_update_collection(mgc, operations, verbose=0):
"""
Has the effect of doing:
for op in operations:
mgc.update(spec=op['spec'], document=op['document'])
but uses bulk operations to do it faster.
:param mgc: the mongo collection to update
:param operations: a list of {spec: spec, document: document} dicts defining the updates.
:param verbose: whether to print info before and after the bulk update
:return: None
"""
bulk_mgc = mgc.initialize_unordered_bulk_op()
for operation in operations:
bulk_mgc.find(operation.get('spec')).update(operation.get('document'))
if verbose > 0:
print(('Starting bulk update {}'.format(datetime.now())))
result = bulk_mgc.execute()
if verbose > 0:
print(('Stoping bulk update {}'.format(datetime.now())))
print(('Update result {}'.format(result)))
def bulk_insert_collection(mgc, docs):
"""
Has the effect of doing:
for doc in docs:
mgc.insert(docs)
but uses bulk operations to do it faster.
:param mgc: the mongo collection to update
:param docs: a list of docs to insert
:return: what ever bulk_mgc.execute() returns
"""
bulk_mgc = mgc.initialize_unordered_bulk_op()
for doc in docs:
bulk_mgc.insert(doc)
return bulk_mgc.execute()
def copy_missing_indices_from(source_mgc, target_mgc):
source_index = source_mgc.index_information()
target_index = target_mgc.index_information()
for k, v in source_index.items():
if k not in target_index:
cumul = []
for field, val in v['key']:
if isinstance(val, float):
cumul.append((field, int(val)))
else:
cumul.append((field, val))
target_mgc.create_index(cumul)
def missing_indices(mgc, required_indices_keys):
if isinstance(required_indices_keys, dict): # assume it's the direct output of a collection.index_information()
# get the keys of required_indices_keys
required_indices_keys = keys_of_index_information(required_indices_keys)
mgc_index_info = set(map(tuple, keys_of_index_information(mgc.index_information())))
missing_keys = list()
for k in required_indices_keys:
if tuple(k) not in mgc_index_info:
missing_keys.append(k)
return missing_keys
def keys_of_index_information(index_information):
return [x['key'] for x in list(index_information.values())]
def mg_collection_string(mgc):
return mgc.database.name + '/' + mgc.name
def imap_with_error_handling(apply_fun, error_fun, except_errors=(Exception,), iterator=None):
"""
imap_with_error_handling(
"""
assert iterator is not None, "You're iterator was None!"
for i, x in enumerate(iterator):
try:
yield apply_fun
except Exception as error:
error_fun(x=x, error=error, i=i)
def convert_dict_for_mongo(d):
n = {}
for k, v in list(d.items()):
if isinstance(v, dict):
n[k] = convert_dict_for_mongo(v)
else:
if isinstance(k, str):
for i in ['utf-8', 'iso-8859-1']:
try:
k = k.encode(i)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
if isinstance(v, (int64, int32)):
v = int(v)
elif isinstance(v, (float64, float32)):
v = float(v)
elif isinstance(v, (ndarray, np.matrixlib.defmatrix.matrix)):
if v.dtype == int32 or v.dtype == int64:
v = v.astype(int).tolist()
elif v.dtype == float32 or v.dtype == float64:
v = v.astype(float).tolist()
else:
v = v.tolist()
elif isinstance(v, str):
for i in ['utf-8', 'iso-8859-1']:
try:
v = v.encode(i)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
elif hasattr(v, 'isoformat'):
v = v.isoformat()
n[k] = v
return n
def iterate_cursor_and_recreate_if_cursor_not_found(cursor_creator, doc_process, start_i=0,
print_progress_fun=None, on_error=None):
"""
Iterates cursor, calling doc_process at every step, and recreates the cursor and restarts the loop if
there's a CursorNotFound error.
cursor_creator(skip) is a function that returns a cursor that skips skip docs.
Initially, the cursor is created calling cursor_creator(skip=0), and the loop keeps track of the number of docs
it processed. If a CursorNotFound error is raised at step i, the cursor is recreated calling
cursor_creator(skip=i), and the loop is restarted.
This is useful for situations where the cursor may "timeout" during voluminous processes.
print_progress_fun(i, doc) (optional) is a function that will be called at every iteration,
BEFORE doc_process is called, usually to print or log something about the progress.
"""
while True:
it = cursor_creator(skip=start_i)
try:
for i, doc in enumerate(it, start_i):
if print_progress_fun is not None:
try:
print_progress_fun(i, doc)
except (CursorNotFound, KeyboardInterrupt, StopIteration) as e:
raise e
# except Exception as e:
# if on_error is not None:
# on_error(doc=doc, error=e, i=i)
# else:
# raise e
try:
doc_process(doc)
except Exception as e:
if on_error is not None:
on_error(obj=doc, error=e, i=i)
else:
raise e
break
except CursorNotFound:
start_i = i
except KeyboardInterrupt:
break
except StopIteration:
break
def get_db_and_collection_and_create_if_doesnt_exist(db, collection, mongo_client=None):
if mongo_client is None:
mongo_client = MongoClient()
try:
mongo_client.create_database(db)
except Exception as e:
print(e)
try:
mongo_client[db].create_collection(collection)
except Exception as e:
print(e)
return mongo_client[db][collection]
def random_selection_iter_from_cursor(cursor, n_selections):
total = cursor.count()
if n_selections >= total:
return cursor
else:
choice_idx = iter(sorted(random.choice(total, n_selections, replace=False)))
def selection_iterator():
current_choice_idx = next(choice_idx)
for i, doc in enumerate(cursor):
if i == current_choice_idx:
yield doc
current_choice_idx = next(choice_idx)
return selection_iterator()
def get_random_doc(collection, *args, **kwargs):
c = collection.find(*args, **kwargs)
count = c.count()
if count == 0:
raise RuntimeError("No documents with specified conditions were found!")
else:
return next(c.limit(-1).skip(random.randint(0, count)))
def mk_find_in_field_logical_query(field, query):
"""
Allows one to create "find in field" query of any logical complexity (since AND, OR, and NOT are supported).
field is the field to consider
query can be a string, a tuple, or a list, and can be nested:
if query is a string, it is is considered to be "must be equal to this"
if the string starts with "-", it is considered to be "must NOT equal to this"
if query is a tuple, take the conjunction (AND) of the tuple's elements
if query is a list, take the disjunction (OR) of the list's elements
"""
if isinstance(query, str):
if query[0] == '-':
return {field: {'$not': {'$eq': query[1:]}}}
else:
return {field: query}
elif isinstance(query, tuple):
return {'$and': [mk_find_in_field_logical_query(field, q) for q in query]}
elif isinstance(query, list):
return {'$or': [mk_find_in_field_logical_query(field, q) for q in query]}
else:
raise TypeError("query must be a string, tuple, or list")
def copy_collection_from_remote_to_local(remote_client, remote_db, remote_collection,
local_db=None, local_collection=None,
max_docs_per_collection=inf, verbose=False):
local_db = local_db or remote_db
local_collection = local_collection or remote_collection
remote_collection_connection = remote_client[remote_db][remote_collection]
local_db_connection = mg.MongoClient()[local_db]
if local_collection in local_db_connection.collection_names():
print(("Local collection '{}' existed and is being deleted".format(local_collection)))
try:
local_db_connection[local_collection].drop()
except mg.errors.OperationFailure as e:
print((" !!! Nope, can't delete that: {}".format(e.message)))
local_collection_connection = local_db_connection[local_collection]
for i, d in enumerate(remote_collection_connection.find()):
if i < max_docs_per_collection:
if verbose:
printProgress("item {}".format(i))
local_collection_connection.insert(d)
else:
break
def get_dict_with_key_from_collection(key, collection):
try:
return collection.find_one({key: {'$exists': True}}).get(key)
except AttributeError:
return None
def insert_df(df, collection, delete_previous_contents=False, **kwargs):
"""
insert the rows of the dataframe df (as dicts) in the given collection.
If you want to do it given a mongo_db and a collection_name:
insert_in_mongdb(df, getattr(mongo_db, collection_name), **kwargs):
If you want to do it given (a client, and...) a db name and collection name:
insert_in_mongdb(df, getattr(getattr(client, db_name), collection_name), **kwargs):
"""
if delete_previous_contents:
collection_name = collection.name
mother_db = collection.database
mother_db.drop_collection(collection_name)
mother_db.create_collection(collection_name)
kwargs = dict(kwargs, **{'safe': True}) # default is safe=True
collection.insert(dict_list_of_rows(df), **kwargs)
def to_df(cursor, roll_out_col=None, rm_id=True):
# if isinstance(cursor, dict):
if not isinstance(cursor, list):
df = pd.DataFrame(list(cursor))
else:
df = pd.DataFrame(cursor)
if rm_id:
df = rm_cols_if_present(df, ['_id'])
if roll_out_col:
# rollout the col
df = rollout_cols(df, roll_out_col)
df = pd.concat([rm_cols_if_present(df, roll_out_col), pd.DataFrame(list(df[roll_out_col]))], axis=1)
return df
def mongorestore(mongodump_file, db, collection, extra_options='', print_the_command=False):
db, collection = _get_db_and_collection_from_filename(mongodump_file, db=db, collection=collection)
command = 'mongorestore --db {db} --collection {collection} {extra_options} {mongodump_file}'.format(
db=db, collection=collection, extra_options=extra_options, mongodump_file=mongodump_file
)
if print_the_command:
print(command)
p = subprocess.Popen(command, shell=True)
p.wait() # wait till the process finishes
def backup_to_s3(db, collection, extra_options='', bucket_name=s3_backup_bucket_name, folder=None):
zip_filename = 'mongo_{db}_{collection}___{date}.bson.gz'.format(db=db, collection=collection,
date=datetime.now().strftime('%Y-%m-%d-%H%M'))
extra_options = extra_options + ' --out -'
command = 'mongodump --db {db} --collection {collection} {extra_options} | gzip > {zip_filename}'.format(
db=db, collection=collection, extra_options=extra_options, zip_filename=zip_filename
)
print(command)
p = subprocess.Popen(command, shell=True)
p.wait()
print("uploading file to s3://{bucket_name}{folder}/{zip_filename}".format(
bucket_name=bucket_name, folder=('/' + folder) if folder else '', zip_filename=zip_filename
))
s3 = S3(bucket_name=bucket_name)
s3.dumpf(zip_filename, zip_filename, folder=folder)
print("removing {zip_filename}".format(zip_filename=zip_filename))
os.remove(zip_filename)
def restore_from_s3_dump(s3_zip_filename, db=None, collection=None, extra_options='',
bucket_name=s3_backup_bucket_name, folder=None, print_the_command=True):
db, collection = _get_db_and_collection_from_filename(s3_zip_filename, db=db, collection=collection)
print("copy s3://{bucket_name}{folder}/{zip_filename} to local {zip_filename}".format(
bucket_name=bucket_name, folder=('/' + folder) if folder else '', zip_filename=s3_zip_filename
))
s3 = S3(bucket_name=bucket_name)
s3.loadf(key_name=s3_zip_filename, local_file_name=s3_zip_filename, folder=folder, bucket_name=bucket_name)
print("unzip {zip_filename}".format(zip_filename=s3_zip_filename))
unzipped_filename = s3_zip_filename.replace('.gz', '')
ungzip(gzip_file=s3_zip_filename, destination_file=unzipped_filename)
print("removing {gzip_file}".format(gzip_file=s3_zip_filename))
os.remove(s3_zip_filename)
mongorestore(mongodump_file=unzipped_filename, db=db, collection=collection,
extra_options=extra_options, print_the_command=print_the_command)
print("removing {unzipped_filename}".format(unzipped_filename=unzipped_filename))
os.remove(unzipped_filename)
def _get_db_and_collection_from_filename(filename, db=None, collection=None):
if db is None:
if collection:
db_coll_re = re.compile('mongo_(.*?)_{collection}___'.format(collection=collection))
return db_coll_re.findall(filename)[0], collection
else:
db_coll_re = re.compile('mongo_([^_]+)_(.*?)___')
return db_coll_re.findall(filename)[0]
else:
if collection:
return db, collection
else:
db_coll_re = re.compile('mongo_{db}_(.*?)___'.format(db=db))
return db, db_coll_re.findall(filename)[0]
# def _integrate_filt(filt, *args, **kwargs):
#
# if len(args) > 0:
# if 'spec' in kwargs:
# raise TypeError("got multiple values for keyword argument 'spec' (one in args, one in kwargs")
# args = list(args)
# kwargs['spec'] = args.pop(0)
# args = tuple(args)
#
# if 'spec' in kwargs:
# recursively_update_with(kwargs['spec'], filt)
# else:
# kwargs['spec'] = filt
#
# return args, kwargs
#
#
class FilteredCollection(Collection):
def __init__(self, mgc, filt=None):
self.mgc = mgc
if filt is None:
filt = {}
self.filt = filt.copy()
def _integrate_filt(self, *args, **kwargs):
if len(args) > 0:
if 'spec' in kwargs:
raise TypeError("got multiple values for keyword argument 'spec' (one in args, one in kwargs")
args = list(args)
kwargs['spec'] = args.pop(0)
args = tuple(args)
if 'spec' in kwargs:
recursively_update_with(kwargs['spec'], self.filt)
else:
kwargs['spec'] = self.filt
return args, kwargs
def find(self, *args, **kwargs):
"""
Filtered version of pymongo collection find.
"""
args, kwargs = self._integrate_filt(*args, **kwargs)
return self.mgc.find(*args, **kwargs)
def find_one(self, *args, **kwargs):
"""
Filtered version of pymongo collection find_one.
"""
args, kwargs = self._integrate_filt(*args, **kwargs)
return self.mgc.find_one(*args, **kwargs)
def count(self):
"""
Filtered version of pymongo collection count.
"""
return self.mgc.find(self.filt).count()
def __getattr__(self, item):
"""
Forward all other things to self.mgc
"""
return self.mgc.__getattr__(item)
#
# # def __getattribute__(self, name):
# # attr = super(FilteredCollection, self).__getattribute__(name)
# # if hasattr(attr, '__call__'):
# #
# # def newfunc(*args, **kwargs):
# # args, kwargs = self._integrate_filt(*args, **kwargs)
# # result = attr(*args, **kwargs)
# # return result
# #
# # return newfunc
# # else:
# # return attr
#
#
# def filtered_mgc(self, filt):
# filt = filt.copy()
#
# def find(self, *args, **kwargs):
# args, kwargs = _integrate_filt(filt, *args, **kwargs)
# return object.__getattribute__(self, 'find')(*args, **kwargs)
#
# def __getattribute__(self, name):
# if name == 'find':
# def newfunc(*args, **kwargs):
# args, kwargs = _integrate_filt(filt, *args, **kwargs)
# result = object.__getattribute__(self, name)(*args, **kwargs)
# return result
# return newfunc
# elif name == 'find_one':
# def newfunc(*args, **kwargs):
# args, kwargs = _integrate_filt(filt, *args, **kwargs)
# result = object.__getattribute__(self, name)(*args, **kwargs)
# return result
# return newfunc
# elif name == 'count':
# def newfunc(*args, **kwargs):
# args, kwargs = _integrate_filt(filt, *args, **kwargs)
# result = object.__getattribute__(self, name)(*args, **kwargs)
# return result
#
# return newfunc
# else:
# return object.__getattribute__(self, name)
# #
# # attr = self.__getattr__(name)
# # if hasattr(attr, '__call__'):
# # def newfunc(*args, **kwargs):
# # args, kwargs = _integrate_filt(filt, *args, **kwargs)
# # result = object.__getattribute__(self, name)(*args, **kwargs)
# # return result
# #
# # return newfunc
# # else:
# # return attr
#
# return inject_method(self, __getattribute__, '__getattribute__')
|
|
import copy
import ntpath
from collections import namedtuple
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import (
ContainerError, DockerException, ImageNotFound,
NotFound, create_unexpected_kwargs_error
)
from ..types import HostConfig
from ..utils import version_gte
from .images import Image
from .resource import Collection, Model
class Container(Model):
""" Local representation of a container object. Detailed configuration may
be accessed through the :py:attr:`attrs` attribute. Note that local
attributes are cached; users may call :py:meth:`reload` to
query the Docker daemon for the current properties, causing
:py:attr:`attrs` to be refreshed.
"""
@property
def name(self):
"""
The name of the container.
"""
if self.attrs.get('Name') is not None:
return self.attrs['Name'].lstrip('/')
@property
def image(self):
"""
The image of the container.
"""
image_id = self.attrs.get('ImageID', self.attrs['Image'])
if image_id is None:
return None
return self.client.images.get(image_id.split(':')[1])
@property
def labels(self):
"""
The labels of a container as dictionary.
"""
try:
result = self.attrs['Config'].get('Labels')
return result or {}
except KeyError:
raise DockerException(
'Label data is not available for sparse objects. Call reload()'
' to retrieve all information'
)
@property
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
if isinstance(self.attrs['State'], dict):
return self.attrs['State']['Status']
return self.attrs['State']
def attach(self, **kwargs):
"""
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach(self.id, **kwargs)
def attach_socket(self, **kwargs):
"""
Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach_socket(self.id, **kwargs)
def commit(self, repository=None, tag=None, **kwargs):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.commit(self.id, repository=repository, tag=tag,
**kwargs)
return self.client.images.get(resp['Id'])
def diff(self):
"""
Inspect changes on a container's filesystem.
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.diff(self.id)
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
Args:
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
either ``stream```or ``socket`` is ``True``.
output: (generator or bytes):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
workdir=workdir,
)
exec_output = self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
return ExecResult(
self.client.api.exec_inspect(resp['Id'])['ExitCode'],
exec_output
)
def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of the container's filesystem as a tar archive.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.export(self.id, chunk_size)
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = container.get_archive('/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
"""
return self.client.api.get_archive(self.id, path, chunk_size)
def kill(self, signal=None):
"""
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.kill(self.id, signal=signal)
def logs(self, **kwargs):
"""
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.logs(self.id, **kwargs)
def pause(self):
"""
Pauses all processes within this container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.pause(self.id)
def put_archive(self, path, data):
"""
Insert a file or folder in this container using a tar archive as
source.
Args:
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`~docker.errors.APIError` If an error occurs.
"""
return self.client.api.put_archive(self.id, path, data)
def remove(self, **kwargs):
"""
Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_container(self.id, **kwargs)
def rename(self, name):
"""
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.rename(self.id, name)
def resize(self, height, width):
"""
Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.resize(self.id, height, width)
def restart(self, **kwargs):
"""
Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.restart(self.id, **kwargs)
def start(self, **kwargs):
"""
Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.start(self.id, **kwargs)
def stats(self, **kwargs):
"""
Stream statistics for this container. Similar to the
``docker stats`` command.
Args:
decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True.
False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stats(self.id, **kwargs)
def stop(self, **kwargs):
"""
Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stop(self.id, **kwargs)
def top(self, **kwargs):
"""
Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.top(self.id, **kwargs)
def unpause(self):
"""
Unpause all processes within the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.unpause(self.id)
def update(self, **kwargs):
"""
Update resource configuration of the containers.
Args:
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.update_container(self.id, **kwargs)
def wait(self, **kwargs):
"""
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.wait(self.id, **kwargs)
class ContainerCollection(Collection):
model = Container
def run(self, image, command=None, stdout=True, stderr=False,
remove=False, **kwargs):
"""
Run a container. By default, it will wait for the container to finish
and return its logs, similar to ``docker run``.
If the ``detach`` argument is ``True``, it will start the container
and immediately return a :py:class:`Container` object, similar to
``docker run -d``.
Example:
Run a container and get its output:
>>> import docker
>>> client = docker.from_env()
>>> client.containers.run('alpine', 'echo hello world')
b'hello world\\n'
Run a container and detach:
>>> container = client.containers.run('bfirsh/reticulate-splines',
detach=True)
>>> container.logs()
'Reticulating spline 1...\\nReticulating spline 2...\\n'
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
dns_search (:py:class:`list`): DNS search domains.
domainname (str or list): Set custom DNS search domains.
entrypoint (str or list): The entrypoint for the container.
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
links (dict): Mapping of links using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to the new
container using the provided alias. Default: ``None``.
log_config (LogConfig): Logging configuration.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
mounts (:py:class:`list`): Specification for mounts to be added to
the container. More powerful alternative to ``volumes``. Each
item in the list is expected to be a
:py:class:`docker.types.Mount` object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
``network_mode``.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
on the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Only used if the method needs to pull the requested image.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp`` or
``udp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
- The port number, as an integer. For example,
``{'2222/tcp': 3333}`` will expose port 2222 inside the
container as port 3333 on the host.
- ``None``, to assign a random host port. For example,
``{'2222/tcp': None}``.
- A tuple of ``(address, port)`` if you want to specify the
host interface. For example,
``{'1111/tcp': ('127.0.0.1', 1111)}``.
- A list of integers, if you want to bind multiple host ports
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
remove (bool): Remove the container when it has finished running.
Default: ``False``.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
stdin_open (bool): Keep ``STDIN`` open even if not attached.
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
Default: ``True``.
stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
stream (bool): If true and ``detach`` is false, return a log
generator instead of a string. Ignored if ``detach`` is true.
Default: ``False``.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
volume name, and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or
``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
depending on the value of the ``stdout`` and ``stderr`` arguments.
``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
or ``journald`` logging driver used. Thus, if you are using none of
these drivers, a ``None`` object is returned instead. See the
`Engine API documentation
<https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
for full details.
If ``detach`` is ``True``, a :py:class:`Container` object is
returned instead.
Raises:
:py:class:`docker.errors.ContainerError`
If the container exits with a non-zero exit code and
``detach`` is ``False``.
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
stream = kwargs.pop('stream', False)
detach = kwargs.pop('detach', False)
platform = kwargs.pop('platform', None)
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
kwargs["auto_remove"] = True
else:
raise RuntimeError("The options 'detach' and 'remove' cannot "
"be used together in api versions < 1.25.")
if kwargs.get('network') and kwargs.get('network_mode'):
raise RuntimeError(
'The options "network" and "network_mode" can not be used '
'together.'
)
try:
container = self.create(image=image, command=command,
detach=detach, **kwargs)
except ImageNotFound:
self.client.images.pull(image, platform=platform)
container = self.create(image=image, command=command,
detach=detach, **kwargs)
container.start()
if detach:
return container
logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
out = None
if logging_driver == 'json-file' or logging_driver == 'journald':
out = container.logs(
stdout=stdout, stderr=stderr, stream=True, follow=True
)
exit_status = container.wait()['StatusCode']
if exit_status != 0:
out = None
if not kwargs.get('auto_remove'):
out = container.logs(stdout=False, stderr=True)
if remove:
container.remove()
if exit_status != 0:
raise ContainerError(
container, exit_status, command, image, out
)
return out if stream or out is None else b''.join(
[line for line in out]
)
def create(self, image, command=None, **kwargs):
"""
Create a container without starting it. Similar to ``docker create``.
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
``stderr``, and ``remove``.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
kwargs['image'] = image
kwargs['command'] = command
kwargs['version'] = self.client.api._version
create_kwargs = _create_container_args(kwargs)
resp = self.client.api.create_container(**create_kwargs)
return self.get(resp['Id'])
def get(self, container_id):
"""
Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp)
def list(self, all=False, before=None, filters=None, limit=-1, since=None,
sparse=False, ignore_removed=False):
"""
List containers. Similar to the ``docker ps`` command.
Args:
all (bool): Show all containers. Only running containers are shown
by default
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str): format either ``"key"`` or ``"key=value"``
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``<image-name>[:tag]``, ``<image-id>``, or
``<image@digest>``.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
sparse (bool): Do not inspect containers. Returns partial
information, but guaranteed not to block. Use
:py:meth:`Container.reload` on resulting objects to retrieve
all attributes. Default: ``False``
ignore_removed (bool): Ignore failures due to missing containers
when attempting to inspect containers from the original list.
Set to ``True`` if race conditions are likely. Has no effect
if ``sparse=True``. Default: ``False``
Returns:
(list of :py:class:`Container`)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.containers(all=all, before=before,
filters=filters, limit=limit,
since=since)
if sparse:
return [self.prepare_model(r) for r in resp]
else:
containers = []
for r in resp:
try:
containers.append(self.get(r['Id']))
# a container may have been removed while iterating
except NotFound:
if not ignore_removed:
raise
return containers
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
prune.__doc__ = APIClient.prune_containers.__doc__
# kwargs to copy straight from run to create
RUN_CREATE_KWARGS = [
'command',
'detach',
'domainname',
'entrypoint',
'environment',
'healthcheck',
'hostname',
'image',
'labels',
'mac_address',
'name',
'network_disabled',
'stdin_open',
'stop_signal',
'tty',
'use_config_proxy',
'user',
'volume_driver',
'working_dir',
]
# kwargs to copy straight from run to host_config
RUN_HOST_CONFIG_KWARGS = [
'auto_remove',
'blkio_weight_device',
'blkio_weight',
'cap_add',
'cap_drop',
'cgroup_parent',
'cpu_count',
'cpu_percent',
'cpu_period',
'cpu_quota',
'cpu_shares',
'cpuset_cpus',
'cpuset_mems',
'cpu_rt_period',
'cpu_rt_runtime',
'device_cgroup_rules',
'device_read_bps',
'device_read_iops',
'device_write_bps',
'device_write_iops',
'devices',
'dns_opt',
'dns_search',
'dns',
'extra_hosts',
'group_add',
'init',
'init_path',
'ipc_mode',
'isolation',
'kernel_memory',
'links',
'log_config',
'lxc_conf',
'mem_limit',
'mem_reservation',
'mem_swappiness',
'memswap_limit',
'mounts',
'nano_cpus',
'network_mode',
'oom_kill_disable',
'oom_score_adj',
'pid_mode',
'pids_limit',
'privileged',
'publish_all_ports',
'read_only',
'restart_policy',
'security_opt',
'shm_size',
'storage_opt',
'sysctls',
'tmpfs',
'ulimits',
'userns_mode',
'uts_mode',
'version',
'volumes_from',
'runtime'
]
def _create_container_args(kwargs):
"""
Convert arguments to create() to arguments to create_container().
"""
# Copy over kwargs which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_CREATE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
host_config_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_HOST_CONFIG_KWARGS:
host_config_kwargs[key] = kwargs.pop(key)
# Process kwargs which are split over both create and host_config
ports = kwargs.pop('ports', {})
if ports:
host_config_kwargs['port_bindings'] = ports
volumes = kwargs.pop('volumes', {})
if volumes:
host_config_kwargs['binds'] = volumes
network = kwargs.pop('network', None)
if network:
create_kwargs['networking_config'] = {network: None}
host_config_kwargs['network_mode'] = network
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error('run', kwargs)
create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
# Fill in any kwargs which need processing by create_host_config first
port_bindings = create_kwargs['host_config'].get('PortBindings')
if port_bindings:
# sort to make consistent for tests
create_kwargs['ports'] = [tuple(p.split('/', 1))
for p in sorted(port_bindings.keys())]
if volumes:
if isinstance(volumes, dict):
create_kwargs['volumes'] = [
v.get('bind') for v in volumes.values()
]
else:
create_kwargs['volumes'] = [
_host_volume_from_bind(v) for v in volumes
]
return create_kwargs
def _host_volume_from_bind(bind):
drive, rest = ntpath.splitdrive(bind)
bits = rest.split(':', 1)
if len(bits) == 1 or bits[1] in ('ro', 'rw'):
return drive + bits[0]
else:
return bits[1].rstrip(':ro').rstrip(':rw')
ExecResult = namedtuple('ExecResult', 'exit_code,output')
""" A result of Container.exec_run with the properties ``exit_code`` and
``output``. """
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import lifecycle_plugin_utils
from heat.engine import lifecycle_plugin
from heat.engine import resources
from heat.tests import common
empty_template = '''
heat_template_version: '2013-05-23'
description: Empty stack
resources:
'''
class LifecyclePluginUtilsTest(common.HeatTestCase):
"""Basic tests for :module:'heat.common.lifecycle_plugin_utils'.
Basic tests for the helper methods in
:module:'heat.common.lifecycle_plugin_utils'.
"""
def tearDown(self):
super(LifecyclePluginUtilsTest, self).tearDown()
lifecycle_plugin_utils.pp_class_instances = None
def mock_lcp_class_map(self, lcp_mappings):
self.mock_get_plugins = self.patchobject(
resources.global_env(), 'get_stack_lifecycle_plugins',
return_value=lcp_mappings)
# reset cache
lifecycle_plugin_utils.pp_class_instances = None
def test_get_plug_point_class_instances(self):
"""Tests the get_plug_point_class_instances function."""
lcp_mappings = [('A::B::C1', TestLifecycleCallout1)]
self.mock_lcp_class_map(lcp_mappings)
pp_cinstances = lifecycle_plugin_utils.get_plug_point_class_instances()
self.assertIsNotNone(pp_cinstances)
self.assertTrue(self.is_iterable(pp_cinstances),
"not iterable: %s" % pp_cinstances)
self.assertEqual(1, len(pp_cinstances))
self.assertEqual(TestLifecycleCallout1, pp_cinstances[0].__class__)
self.mock_get_plugins.assert_called_once_with()
def test_do_pre_and_post_callouts(self):
lcp_mappings = [('A::B::C1', TestLifecycleCallout1)]
self.mock_lcp_class_map(lcp_mappings)
mc = mock.Mock()
mc.__setattr__("pre_counter_for_unit_test", 0)
mc.__setattr__("post_counter_for_unit_test", 0)
ms = mock.Mock()
ms.__setattr__("action", 'A')
lifecycle_plugin_utils.do_pre_ops(mc, ms, None, None)
self.assertEqual(1, mc.pre_counter_for_unit_test)
lifecycle_plugin_utils.do_post_ops(mc, ms, None, None)
self.assertEqual(1, mc.post_counter_for_unit_test)
self.mock_get_plugins.assert_called_once_with()
def test_class_instantiation_and_sorting(self):
lcp_mappings = []
self.mock_lcp_class_map(lcp_mappings)
pp_cis = lifecycle_plugin_utils.get_plug_point_class_instances()
self.assertEqual(0, len(pp_cis))
self.mock_get_plugins.assert_called_once_with()
# order should change with sort
lcp_mappings = [('A::B::C2', TestLifecycleCallout2),
('A::B::C1', TestLifecycleCallout1)]
self.mock_lcp_class_map(lcp_mappings)
pp_cis = lifecycle_plugin_utils.get_plug_point_class_instances()
self.assertEqual(2, len(pp_cis))
self.assertEqual(100, pp_cis[0].get_ordinal())
self.assertEqual(101, pp_cis[1].get_ordinal())
self.assertEqual(TestLifecycleCallout1, pp_cis[0].__class__)
self.assertEqual(TestLifecycleCallout2, pp_cis[1].__class__)
self.mock_get_plugins.assert_called_once_with()
# order should NOT change with sort
lcp_mappings = [('A::B::C1', TestLifecycleCallout1),
('A::B::C2', TestLifecycleCallout2)]
self.mock_lcp_class_map(lcp_mappings)
pp_cis = lifecycle_plugin_utils.get_plug_point_class_instances()
self.assertEqual(2, len(pp_cis))
self.assertEqual(100, pp_cis[0].get_ordinal())
self.assertEqual(101, pp_cis[1].get_ordinal())
self.assertEqual(TestLifecycleCallout1, pp_cis[0].__class__)
self.assertEqual(TestLifecycleCallout2, pp_cis[1].__class__)
self.mock_get_plugins.assert_called_once_with()
# sort failure due to exception in thrown by ordinal
lcp_mappings = [('A::B::C2', TestLifecycleCallout2),
('A::B::C3', TestLifecycleCallout3),
('A::B::C1', TestLifecycleCallout1)]
self.mock_lcp_class_map(lcp_mappings)
pp_cis = lifecycle_plugin_utils.get_plug_point_class_instances()
self.assertEqual(3, len(pp_cis))
self.assertEqual(100, pp_cis[2].get_ordinal())
self.assertEqual(101, pp_cis[0].get_ordinal())
# (can sort fail partially? If so then this test may break)
self.assertEqual(TestLifecycleCallout2, pp_cis[0].__class__)
self.assertEqual(TestLifecycleCallout3, pp_cis[1].__class__)
self.assertEqual(TestLifecycleCallout1, pp_cis[2].__class__)
self.mock_get_plugins.assert_called_once_with()
def test_do_pre_op_failure(self):
lcp_mappings = [('A::B::C5', TestLifecycleCallout1),
('A::B::C4', TestLifecycleCallout4)]
self.mock_lcp_class_map(lcp_mappings)
mc = mock.Mock()
mc.__setattr__("pre_counter_for_unit_test", 0)
mc.__setattr__("post_counter_for_unit_test", 0)
ms = mock.Mock()
ms.__setattr__("action", 'A')
failed = False
try:
lifecycle_plugin_utils.do_pre_ops(mc, ms, None, None)
except Exception:
failed = True
self.assertTrue(failed)
self.assertEqual(1, mc.pre_counter_for_unit_test)
self.assertEqual(1, mc.post_counter_for_unit_test)
self.mock_get_plugins.assert_called_once_with()
def test_do_post_op_failure(self):
lcp_mappings = [('A::B::C1', TestLifecycleCallout1),
('A::B::C5', TestLifecycleCallout5)]
self.mock_lcp_class_map(lcp_mappings)
mc = mock.Mock()
mc.__setattr__("pre_counter_for_unit_test", 0)
mc.__setattr__("post_counter_for_unit_test", 0)
ms = mock.Mock()
ms.__setattr__("action", 'A')
lifecycle_plugin_utils.do_post_ops(mc, ms, None, None)
self.assertEqual(1, mc.post_counter_for_unit_test)
self.mock_get_plugins.assert_called_once_with()
def test_exercise_base_lifecycle_plugin_class(self):
lcp = lifecycle_plugin.LifecyclePlugin()
ordinal = lcp.get_ordinal()
lcp.do_pre_op(None, None, None)
lcp.do_post_op(None, None, None)
self.assertEqual(100, ordinal)
def is_iterable(self, obj):
# special case string
if not object:
return False
if isinstance(obj, str):
return False
# Test for iterabilityy
try:
for m in obj:
break
except TypeError:
return False
return True
class TestLifecycleCallout1(lifecycle_plugin.LifecyclePlugin):
"""Sample test class for testing pre-op and post-op work on a stack."""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
cnxt.pre_counter_for_unit_test += 1
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
cnxt.post_counter_for_unit_test += 1
def get_ordinal(self):
return 100
class TestLifecycleCallout2(lifecycle_plugin.LifecyclePlugin):
"""Sample test class for testing pre-op and post-op work on a stack.
Different ordinal and increment counters by 2.
"""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
cnxt.pre_counter_for_unit_test += 2
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
cnxt.post_counter_for_unit_test += 2
def get_ordinal(self):
return 101
class TestLifecycleCallout3(lifecycle_plugin.LifecyclePlugin):
"""Sample test class for testing pre-op and post-op work on a stack.
Methods raise exceptions.
"""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
raise Exception()
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
raise Exception()
def get_ordinal(self):
raise Exception()
class TestLifecycleCallout4(lifecycle_plugin.LifecyclePlugin):
"""Sample test class for testing pre-op and post-op work on a stack.
do_pre_op, do_post_op both throw exception.
"""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
raise Exception()
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
raise Exception()
def get_ordinal(self):
return 103
class TestLifecycleCallout5(lifecycle_plugin.LifecyclePlugin):
"""Sample test class for testing pre-op and post-op work on a stack.
do_post_op throws exception.
"""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
cnxt.pre_counter_for_unit_test += 1
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
raise Exception()
def get_ordinal(self):
return 100
|
|
# Copyright 2011,2012,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various utility functions
Some of these are POX-specific, and some aren't.
"""
#TODO: Break into multiple modules? (data structures, POX-specific, etc.)
from __future__ import print_function
import traceback
import struct
import sys
import os
import time
import socket
import collections
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
"""
A list which keeps track of changes
When the list is altered, callback (if any) is called, and dirty is set.
"""
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
class DefaultDict (collections.defaultdict):
"""
A dictionary that can create missing values
This is similar to (and a subclass of) collections.defaultdict. However, it
calls the default factory passing it the missing key.
"""
#TODO: Make key-passing a constructor option so that this can serve as a
# complete defaultdict replacement.
def __missing__ (self, key):
v = self.default_factory(key)
self[key] = v
return v
def set_extend (l, index, item, emptyValue = None):
"""
Sets l[index] = item, padding l if needed
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
#TODO: Better name? The 'set' is a bit misleading.
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def str_to_dpid (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
if s.lower().startswith("0x"):
s = s[2:]
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
if a > 0xffFFffFFffFF:
b = a >> 48
a &= 0xffFFffFFffFF
else:
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
strToDPID = str_to_dpid
def dpid_to_str (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
dpidToStr = dpid_to_str # Deprecated
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2],
stack[-3][0], stack[-3][1])
type_msg = ("%s must be instance of %s (but is %s)"
% (name, allowed_types , str(type(obj))))
raise AssertionError(stack_msg + ": " + type_msg)
def init_helper (obj, kw):
"""
Helper for classes with attributes initialized by keyword arguments.
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
initHelper = init_helper # Deprecated
def make_pinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
def __repr__ (self):
return "<%s %i/%i>" % (self.__class__.__name__, self._w, self._r)
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
#FIXME: Since the read socket is now nonblocking, there's the possibility
# that the recv() calls for pong will not complete. We should
# deal with this.
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
def __repr__ (self):
return "<%s %s/%s>" % (self.__class__.__name__, self._w, self._r)
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
#TODO: use socketpair if available?
localaddresses = ['127.0.0.1', '127.127.127.127'] # Try oddball one first
startPort = 10000
import socket
import select
def tryConnect ():
l = None
localaddress = None
port = None
while True:
if localaddress is None:
if not localaddresses:
raise RuntimeError("Could not find a free socket")
localaddress = localaddresses.pop()
port = startPort
try:
l = socket.socket()
l.bind( (localaddress, port) )
l.listen(0)
break
except:
port += 1
if port - startPort > 1000:
localaddress = None
l.setblocking(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
t = time.time() + 2
while time.time() < t:
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) != 0:
break
else:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(0)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
makePinger = make_pinger # Deprecated
def is_subclass (cls, classinfo):
"""
A more sensible version of the issubclass builtin
"""
try:
return issubclass(cls, classinfo)
except TypeError:
return False
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
"""
Converts raw data to a hex dump
"""
if isinstance(data, (str,bytes)):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' |%-16s|' % (t,)
o += l
return o
def connect_socket_with_backoff (address, port, max_backoff_seconds=32):
"""
Attempt to connect to the given address and port.
If the connection attempt fails, exponentially back off, up to the maximum.
return the connected socket, or raise an exception if the connection
was unsuccessful by the time the maximum was reached.
Note: blocks while connecting.
"""
#TODO: Remove? The backoff IOWorker seems like a better way to do this
# in general.
backoff_seconds = 1
sock = None
print("connect_socket_with_backoff(address=%s, port=%d)"
% (address, port), file=sys.stderr)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print("%s. Backing off %d seconds ..." % (str(e), backoff_seconds),
file=sys.stderr)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d"
% (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
_scalar_types = (int, long, basestring, float, bool)
def is_scalar (v):
"""
Is the given value a scalar-like object?
"""
return isinstance(v, _scalar_types)
def is_listlike (o):
"""
Is this a sequence that isn't like a string or bytes?
"""
if isinstance(o, (bytes,str,bytearray)): return False
return isinstance(o, collections.Iterable)
def fields_of (obj, primitives_only=False,
primitives_and_composites_only=False, allow_caps=False,
ignore=set()):
"""
Returns key/value pairs of things that seem like public fields of an object.
"""
#NOTE: The above docstring isn't split into two lines on purpose.
#NOTE: See Python builtin vars().
r = {}
for k in dir(obj):
if k.startswith('_'): continue
if k in ignore: continue
v = getattr(obj, k)
if hasattr(v, '__call__'): continue
if not allow_caps and k.upper() == k: continue
if primitives_only:
if not isinstance(v, _scalar_types):
continue
elif primitives_and_composites_only:
if not isinstance(v, (int, long, basestring, float, bool, set,
dict, list)):
continue
#r.append((k,v))
r[k] = v
return r
def eval_args (f):
"""
A decorator which causes arguments to be interpreted as Python literals
This isn't a generic decorator, but is specifically meant for POX component
launch functions -- the actual magic is in POX's boot code.
The intention is for launch function/commandline arguments (normally all
strings) to easily receive other types.
"""
f._pox_eval_args = True
return f
if __name__ == "__main__":
#TODO: move to tests?
def cb (t,k,v): print(v)
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print(l)
|
|
# Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import shutil
import string
import tempfile
from unittest import mock
import yaml
import zipfile
from murano.packages import exceptions
from murano.packages import load_utils
import murano.tests.unit.base as test_base
class TestLoadUtils(test_base.MuranoTestCase):
def setUp(cls):
super(TestLoadUtils, cls).setUp()
cls.temp_directories = []
cls.temp_files = []
def _create_temp_dir(self):
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def _create_temp_file(self):
temp_file = tempfile.NamedTemporaryFile(delete=True)
self.temp_files.append(temp_file)
return temp_file
def _create_temp_zip_file(self, zip_path, manifest_path,
arcname='manifest.yaml'):
zip_ = zipfile.ZipFile(zip_path, 'w')
zip_.write(manifest_path, arcname=arcname)
zip_.close()
self.temp_files.append(zip_)
return zip_
def tearDown(cls):
super(TestLoadUtils, cls).tearDown()
for directory in cls.temp_directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
for file in cls.temp_files:
if isinstance(file, zipfile.ZipFile):
if zipfile.is_zipfile(file.filename):
os.remove(file)
else:
if os.path.isfile(file.name):
os.remove(file.name)
def _test_load_from_file(self, target_dir=None, drop_dir=True):
manifest_file_contents = dict(
Format='MuranoPL/1.1',
FullName='test_full_name',
Type='Application',
Description='test_description',
Author='test_author',
Supplier='test_supplier',
Tags=[]
)
test_directory = self._create_temp_dir()
manifest_path = os.path.join(test_directory, 'manifest.yaml')
zip_path = os.path.join(test_directory, 'test_zip_load_utils.zip')
with open(manifest_path, 'w') as manifest_file:
yaml.dump(manifest_file_contents, manifest_file,
default_flow_style=True)
self._create_temp_zip_file(zip_path, manifest_path)
with load_utils.load_from_file(archive_path=zip_path,
target_dir=target_dir,
drop_dir=drop_dir) as plugin:
self.assertEqual('MuranoPL', plugin.format_name)
self.assertEqual('1.1.0', str(plugin.runtime_version))
self.assertEqual(manifest_file_contents['FullName'],
plugin.full_name)
self.assertEqual(manifest_file_contents['Description'],
plugin.description)
self.assertEqual(manifest_file_contents['Author'],
plugin.author)
self.assertEqual(manifest_file_contents['Supplier'],
plugin.supplier)
self.assertEqual(manifest_file_contents['Tags'],
plugin.tags)
def test_load_from_file(self):
self._test_load_from_file(target_dir=None, drop_dir=True)
def test_load_from_file_with_custom_target_directory(self):
target_dir = self._create_temp_dir()
self._test_load_from_file(target_dir=target_dir, drop_dir=True)
@mock.patch('murano.packages.load_utils.get_plugin_loader')
def test_load_from_file_with_invalid_handler(self, mock_plugin_loader):
mock_plugin_loader().get_package_handler = mock.MagicMock(
return_value=None)
test_format = 'Invalid Format'
manifest_file_contents = dict(
Format=test_format,
FullName='test_full_name',
Type='Application',
Description='test_description',
Author='test_author',
Supplier='test_supplier',
Tags=[]
)
test_directory = self._create_temp_dir()
target_dir = self._create_temp_dir()
manifest_path = os.path.join(test_directory, 'manifest.yaml')
zip_path = os.path.join(test_directory, 'test_zip_load_utils.zip')
with open(manifest_path, 'w') as manifest_file:
yaml.dump(manifest_file_contents, manifest_file,
default_flow_style=True)
self._create_temp_zip_file(zip_path, manifest_path)
expected_error_msg = "Unsupported format {0}".format(test_format)
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
with load_utils.load_from_file(archive_path=zip_path,
target_dir=target_dir,
drop_dir=True):
pass
mock_plugin_loader().get_package_handler.assert_called_once_with(
test_format)
def test_load_from_file_with_invalid_archive_path(self):
expected_error_msg = "Unable to find package file"
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
with load_utils.load_from_file('invalid file path'):
pass
@mock.patch('murano.packages.load_utils.os')
def test_load_from_file_with_nonempty_target_directory(self, mock_os):
mock_os.listdir = mock.MagicMock(return_value=True)
temp_file = self._create_temp_file()
expected_error_msg = "Target directory is not empty"
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
this_dir = os.path.dirname(os.path.realpath(__file__))
with load_utils.load_from_file(temp_file.name,
target_dir=this_dir):
pass
def test_load_from_file_without_zip_file(self):
temp_file = self._create_temp_file()
expected_error_msg = "Uploaded file {0} is not a zip archive".\
format(temp_file.name)
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
with load_utils.load_from_file(temp_file.name):
pass
@mock.patch('murano.packages.load_utils.zipfile')
def test_load_from_file_handle_value_error(self, mock_zipfile):
test_error_msg = 'Random error message.'
expected_error_msg = "Couldn't load package from file: {0}".\
format(test_error_msg)
mock_zipfile.is_zipfile = mock.MagicMock(
side_effect=ValueError(test_error_msg))
temp_file = self._create_temp_file()
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
with load_utils.load_from_file(temp_file.name):
pass
mock_zipfile.is_zipfile.assert_called_once_with(
temp_file.name)
def test_load_from_dir_without_source_directory(self):
expected_error_msg = 'Invalid package directory'
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
load_utils.load_from_dir('random_test_directory')
def test_load_from_dir_with_invalid_source_directory(self):
source_directory = self._create_temp_dir()
expected_error_msg = 'Unable to find package manifest'
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
load_utils.load_from_dir(source_directory)
@mock.patch('murano.packages.load_utils.os.path.isfile')
def test_load_from_dir_open_file_negative(self, mock_isfile):
mock_isfile.return_value = True
source_directory = self._create_temp_dir()
random_filename = ''.join(random.choice(string.ascii_lowercase)
for i in range(20))
expected_error_msg = 'Unable to load due to'
with self.assertRaisesRegex(exceptions.PackageLoadError,
expected_error_msg):
load_utils.load_from_dir(source_directory,
filename=random_filename)
|
|
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local directory-specific hook implementations.
Since this file is located at the root of all ensembl-compara tests, every test in every subfolder will have
access to the plugins, hooks and fixtures defined here.
"""
# Disable all the redefined-outer-name violations due to how pytest fixtures work
# pylint: disable=redefined-outer-name
from contextlib import ExitStack
import os
from pathlib import Path
import shutil
import time
from typing import Any, Callable, Dict, Generator, Optional
import pytest
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureRequest
from _pytest.python_api import RaisesContext
from _pytest.tmpdir import TempPathFactory
import sqlalchemy
from ensembl.compara.db import UnitTestDB
from ensembl.compara.filesys import DirCmp, PathLike
def pytest_addoption(parser: Parser) -> None:
"""Registers argparse-style options for Compara's unit testing.
`Pytest initialisation hook
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_addoption>`_.
Args:
parser: Parser for command line arguments and ini-file values.
"""
# Add the Compara unitary test parameters to pytest parser
group = parser.getgroup("compara unit testing")
group.addoption('--server', action='store', metavar='URL', dest='server', required=True,
help="URL to the server where to create the test database(s).")
group.addoption('--keep-data', action='store_true', dest='keep_data',
help="Do not remove test databases/temporary directories. Default: False")
def pytest_configure(config: Config) -> None:
"""Adds global variables and configuration attributes required by Compara's unit tests.
`Pytest initialisation hook
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure>`_.
Args:
config: Access to configuration values, pluginmanager and plugin hooks.
"""
# Load server information
server_url = sqlalchemy.engine.url.make_url(config.getoption('server'))
# If password starts with "$", treat it as an environment variable that needs to be resolved
if server_url.password and server_url.password.startswith('$'):
server_url.password = os.environ[server_url.password[1:]]
config.option.server = str(server_url)
# Add global variables
pytest.dbs_dir = Path(__file__).parent / 'databases'
pytest.files_dir = Path(__file__).parent / 'flatfiles'
def pytest_make_parametrize_id(val: Any) -> str:
"""Returns a readable string representation of `val` that will be used by @pytest.mark.parametrize calls.
`Pytest collection hook
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_make_parametrize_id>`_.
Args:
val: The parametrized value.
"""
if isinstance(val, ExitStack):
return 'No error'
if isinstance(val, RaisesContext):
return val.expected_exception.__name__
return str(val)
@pytest.fixture(name='db_factory', scope='session')
def db_factory_(request: FixtureRequest) -> Generator:
"""Yields a unit test database (:class:`UnitTestDB`) factory.
Args:
request: Access to the requesting test context.
"""
created = {} # type: Dict[str, UnitTestDB]
server_url = request.config.getoption('server')
def db_factory(src: PathLike, name: Optional[str] = None) -> UnitTestDB:
"""Returns a unit test database (:class:`UnitTestDB`) object.
Args:
src: Directory path where the test database schema and content files are located. If a relative
path is provided, the root folder will be ``src/python/tests/databases``.
name: Name to give to the new database. See :meth:`UnitTestDB.__init__()` for more information.
"""
src_path = Path(src) if os.path.isabs(src) else pytest.dbs_dir / src
db_key = name if name else src_path.name
return created.setdefault(db_key, UnitTestDB(server_url, src_path, name))
yield db_factory
# Drop all unit test databases unless the user has requested to keep them
if not request.config.getoption('keep_data'):
for test_db in created.values():
test_db.drop()
@pytest.fixture(scope='session')
def database(request: FixtureRequest, db_factory: Callable) -> Generator:
"""Returns a unit test database (:class:`UnitTestDB`) object.
Requires a dictionary with keys `src` (mandatory) and `name` (optional) passed via `request.param`. See
:meth:`db_factory()` for details about each key's value. This fixture is a wrapper of :meth:`db_factory()`
intended to be used via indirect parametrization, for example::
@pytest.mark.parametrize("database", [{'src': 'master'}], indirect=True)
def test_method(..., database: UnitTestDB, ...):
Args:
request: Access to the requesting test context.
"""
return db_factory(request.param['src'], request.param.get('name', None))
@pytest.fixture(scope='session')
def multi_dbs(request: FixtureRequest, db_factory: Callable) -> Dict:
"""Returns a dictionary of unit test database (:class:`UnitTestDB`) objects with the database name as key.
Requires a list of dictionaries, each with keys `src` (mandatory) and `name` (optional), passed via
`request.param`. See :meth:`db_factory()` for details about each key's value. This fixture is a wrapper of
:meth:`db_factory()` intended to be used via indirect parametrization, for example::
@pytest.mark.parametrize("multi_dbs", [[{'src': 'master'}, {'src': 'master', 'name': 'master2'}]],
indirect=True)
def test_method(..., multi_dbs: Dict[str, UnitTestDB], ...):
Args:
request: Access to the requesting test context.
"""
databases = {}
for element in request.param:
src = Path(element['src'])
name = element.get('name', None)
key = name if name else src.name
databases[key] = db_factory(src, name)
return databases
@pytest.fixture(scope='session')
def tmp_dir(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Generator:
"""Yields a :class:`Path` object pointing to a newly created temporary directory.
Args:
request: Access to the requesting test context.
tmp_path_factory: Session-scoped fixture that creates arbitrary temporary directories.
"""
tmpdir = tmp_path_factory.mktemp(f"compara_{request.node.name}")
yield tmpdir
# Delete the temporary directory unless the user has requested to keep it
if not request.config.getoption("keep_data"):
shutil.rmtree(tmpdir)
@pytest.fixture(scope='session')
def dir_cmp(request: FixtureRequest, tmp_dir: Path) -> DirCmp:
"""Returns a directory tree comparison (:class:`DirCmp`) object.
Requires a dictionary with the following keys:
ref (:obj:`PathLike`): Reference root directory path.
target (:obj:`PathLike`): Target root directory path.
passed via `request.param`. In both cases, if a relative path is provided, the starting folder will be
``src/python/tests/flatfiles``. This fixture is intended to be used via indirect parametrization, for
example::
@pytest.mark.parametrize("dir_cmp", [{'ref': 'citest/reference', 'target': 'citest/target'}],
indirect=True)
def test_method(..., dir_cmp: DirCmp, ...):
Args:
request: Access to the requesting test context.
tmp_dir: Temporary directory path.
"""
# Get the source and temporary absolute paths for reference and target root directories
ref = Path(request.param['ref'])
ref_src = ref if ref.is_absolute() else pytest.files_dir / ref
ref_tmp = tmp_dir / str(ref).replace(os.path.sep, '_')
target = Path(request.param['target'])
target_src = target if target.is_absolute() else pytest.files_dir / target
target_tmp = tmp_dir / str(target).replace(os.path.sep, '_')
# Copy directory trees (if they have not been copied already) ignoring file metadata
if not ref_tmp.exists():
shutil.copytree(ref_src, ref_tmp, copy_function=shutil.copy)
# Sleep one second in between to ensure the timestamp differs between reference and target files
time.sleep(1)
if not target_tmp.exists():
shutil.copytree(target_src, target_tmp, copy_function=shutil.copy)
return DirCmp(ref_tmp, target_tmp)
|
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the client_replay.py script.
To run tests, just call this script with the intended chrome, chromedriver
binaries and an optional test filter. This file interfaces with the
client_replay mainly using the CommandSequence class. Each of the test cases
matches a test case from chromedriver/test/run_py_tests.py; they run the same
case from run_py_tests.py, then replay the log and check that the behavior
matches.
"""
# pylint: disable=g-import-not-at-top, g-bad-import-order
import json
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import traceback
import unittest
import client_replay
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
_PARENT_DIR = os.path.join(_THIS_DIR, os.pardir)
_TEST_DIR = os.path.join(_PARENT_DIR, "test")
_PY_TESTS = os.path.join(_TEST_DIR, "run_py_tests.py")
sys.path.insert(1, _PARENT_DIR)
import chrome_paths
import util
sys.path.remove(_PARENT_DIR)
sys.path.insert(1, _TEST_DIR)
import unittest_util
import webserver
sys.path.remove(_TEST_DIR)
# pylint: enable=g-import-not-at-top, g-bad-import-order
_NEGATIVE_FILTER = []
def SubstituteVariableEntries(s):
"""Identifies and removes items that can legitimately vary between runs."""
white_list = r'(("(id|userDataDir|frameId|version' \
r'|element-6066-11e4-a52e-4f735466cecf|message|timestamp' \
r'|expiry|chromedriverVersion|sessionId)": ' \
r'("[0-9]\.[0-9]*(\.[0-9]*)? \([a-f0-9]*\)"|[^\s},]*))' \
r'|CDwindow-[A-F0-9]*|cd_frame_id_="[a-f0-9]*")'
return re.sub(white_list, "<variable_item>", s)
def ClearPort(s):
"""Removes port numbers from urls in the given string."""
s = re.sub(r":([0-9]){5}/", "<port>", s)
return re.sub(r"localhost:([0-9]*)", "localhost:<port>", s)
def GenerateTestLog(test_name, chromedriver_path, chrome_path, log_dir):
"""Place the ChromeDriver log from running |test_name| in |log_dir|.
The log file is put in |log_dir| and named |test_name|.log.
Args:
test_name: test name from run_py_tests.py. Example: testGetTitle
chromedriver_path: path to ChromeDriver binary
chrome_path: path to Chrome binary
log_dir: directory in which to put the ChromeDriver log file.
Raises:
RuntimeError: run_py_tests.py had a test failure or other error.
"""
args = [
sys.executable,
_PY_TESTS,
"--chromedriver=%s" % chromedriver_path,
"--chrome=%s" % chrome_path,
"--replayable=true",
"--log-path=%s" % log_dir,
"--filter=%s" % ("*" + test_name)
]
result = subprocess.call(args)
if result != 0:
raise RuntimeError("run_py_tests.py could not be run or failed.")
class ChromeDriverClientReplayTest(unittest.TestCase):
"""Base class for test cases."""
def __init__(self, *args, **kwargs):
super(ChromeDriverClientReplayTest, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
"""Starts the server for the necessary pages for testing."""
# make a temp dir to place test logs into
cls.log_dir = tempfile.mkdtemp()
try:
cls.http_server = webserver.WebServer(chrome_paths.GetTestData())
cls.server_url = cls.http_server.GetUrl()
except Exception:
cls.tearDownClass()
raise
@classmethod
def tearDownClass(cls):
"""Tears down the server."""
shutil.rmtree(cls.log_dir)
if getattr(cls, 'http_server', False):
cls.http_server.Shutdown()
def CheckResponsesMatch(self, real, logged):
"""Asserts that the given pair of responses match.
These are usually the replay response and the logged response.
Checks that they match, up to differences in session, window, element
IDs, timestamps, etc.
Args:
real: actual response from running the command
logged: logged response, taken from the log file
"""
if not real and not logged:
return
if isinstance(real, dict) and "message" in real:
real = "ERROR " + real["message"].split("\n")[0]
# pylint: disable=unidiomatic-typecheck
self.assertTrue(type(logged) == type(real)
or (isinstance(real, str)
and isinstance(logged, str)))
# pylint: enable=unidiomatic-typecheck
if isinstance(real, str) \
and (real[:14] == "<!DOCTYPE html" or real[:5] == "<html"):
real = "".join(real.split())
logged = "".join(logged.split())
if not isinstance(real, str):
real = json.dumps(real)
logged = json.dumps(logged)
real = ClearPort(real)
logged = ClearPort(logged)
real = SubstituteVariableEntries(real)
logged = SubstituteVariableEntries(logged)
self.assertEqual(real, logged)
def runTest(self, test_name):
"""Runs the test. Compares output from Chrome to the output in the log file.
Args:
test_name: name of the test to run from run_py_tests.py.
"""
log_file = os.path.join(ChromeDriverClientReplayTest.log_dir,
test_name + ".log")
GenerateTestLog(test_name, _CHROMEDRIVER, _CHROME, log_file)
with open(log_file) as lf:
replay_path = log_file if _OPTIONS.devtools_replay else ""
server = client_replay.StartChromeDriverServer(_CHROMEDRIVER,
_OPTIONS.output_log_path,
replay_path)
chrome_binary = (util.GetAbsolutePathOfUserPath(_CHROME)
if _CHROME else None)
replayer = client_replay.Replayer(lf, server, chrome_binary,
self.server_url)
real_response = None
while True:
command = replayer.command_sequence.NextCommand(real_response)
if not command:
break
logged_response = replayer.command_sequence._last_response
real_response = replayer.executor.Execute(
client_replay._COMMANDS[command.name],
command.GetPayloadPrimitive())
self.CheckResponsesMatch(real_response["value"],
logged_response.GetPayloadPrimitive())
server.Kill()
def GetFunctionName(self):
"""Get the name of the function that calls this one."""
# https://stackoverflow.com/questions/251464/
# how-to-get-a-function-name-as-a-string-in-python
return traceback.extract_stack(None, 2)[0][2]
def testGetPageSource(self):
self.runTest(self.GetFunctionName())
def testCloseWindow(self):
self.runTest(self.GetFunctionName())
def testIFrameWithExtensionsSource(self):
self.runTest(self.GetFunctionName())
def testUnexpectedAlertBehaviour(self):
self.runTest(self.GetFunctionName())
def testFileDownloadWithClick(self):
self.runTest(self.GetFunctionName())
def testCanSwitchToPrintPreviewDialog(self):
self.runTest(self.GetFunctionName())
def testClearElement(self):
self.runTest(self.GetFunctionName())
def testEmulateNetworkConditions(self):
self.runTest(self.GetFunctionName())
def testSwitchToWindow(self):
self.runTest(self.GetFunctionName())
def testEvaluateScript(self):
self.runTest(self.GetFunctionName())
def testEvaluateInvalidScript(self):
self.runTest(self.GetFunctionName())
def testGetTitle(self):
self.runTest(self.GetFunctionName())
def testSendCommand(self):
self.runTest(self.GetFunctionName())
def testGetSessions(self):
self.runTest(self.GetFunctionName())
def testQuitASessionMoreThanOnce(self):
self.runTest(self.GetFunctionName())
def GetNegativeFilter(chrome_version):
"""Construct the appropriate negative test filter for the chrome ."""
if _NEGATIVE_FILTER:
return "*-" + ":__main__.".join([""] + _NEGATIVE_FILTER)
return "*"
def main():
usage = "usage: %prog <chromedriver binary> <chrome binary> [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"", "--output-log-path",
help="Output verbose server logs to this file")
parser.add_option(
"", "--filter", type="string", default="*",
help="Filter for specifying what tests to run, \"*\" will run all,"
"including tests excluded by default. E.g., *testRunMethod")
parser.add_option(
"", "--devtools-replay", help="Replay DevTools instead of using\n"
"real Chrome.")
# Need global to access these from the test runner.
# pylint: disable=global-variable-undefined
global _OPTIONS, _CHROMEDRIVER, _CHROME
# pylint: enable=global-variable-undefined
_OPTIONS, args = parser.parse_args()
_CHROMEDRIVER = util.GetAbsolutePathOfUserPath(args[0])
_CHROME = util.GetAbsolutePathOfUserPath(args[1])
if not os.path.exists(_CHROMEDRIVER):
parser.error("Path given for chromedriver is invalid.\n"
'Please run "%s --help" for help' % __file__)
if not os.path.exists(_CHROME):
parser.error("Path given for chrome is invalid.\n"
'Please run "%s --help" for help' % __file__)
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
test_filter = (GetNegativeFilter()
if not _OPTIONS.filter else _OPTIONS.filter)
tests = unittest_util.FilterTestSuite(all_tests_suite, test_filter)
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(tests)
sys.exit(len(result.failures) + len(result.errors))
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
import logging
import unittest
import MySQLdb
import environment
import tablet
import utils
use_mysqlctld = True
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
setup_procs = []
def setUpModule():
try:
environment.topo_server().setup()
# start mysql instance external to the test
global setup_procs
setup_procs = [
tablet_master.init_mysql(),
tablet_replica1.init_mysql(),
tablet_replica2.init_mysql(),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_master.teardown_mysql(),
tablet_replica1.teardown_mysql(),
tablet_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
tablet_master.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_master.tablet_alias])
def tearDown(self):
for t in tablet_master, tablet_replica1:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
self._reset_tablet_dir(t)
t.start_vttablet(wait_for_state='SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True)
def _reset_tablet_dir(self, t):
"""Stop mysql, delete everything including tablet dir, restart mysql."""
utils.wait_procs([t.teardown_mysql()])
t.remove_tree()
proc = t.init_mysql()
if use_mysqlctld:
t.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
def _list_backups(self):
"""Get a list of backup names for the test shard."""
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
return backups.splitlines()
def _remove_backup(self, backup):
"""Remove a named backup from the test shard."""
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backup],
auto_log=True, mode=utils.VTCTL_VTCTL)
def test_backup(self):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the restored slave has the right local_metadata
result = tablet_replica2.mquery('_vt', 'select * from local_metadata')
metadata = {}
for row in result:
metadata[row[0]] = row[1]
self.assertEqual(metadata['Alias'], 'test_nj-0000062346')
self.assertEqual(metadata['ClusterAlias'], 'test_keyspace.0')
self.assertEqual(metadata['DataCenter'], 'test_nj')
self.assertEqual(metadata['PromotionRule'], 'neutral')
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias))
# remove the backup and check that the list is empty
self._remove_backup(backups[0])
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def test_master_slave_same_backup(self):
"""Test a master and slave from the same backup.
Check that a slave and master both restored from the same backup
can replicate successfully.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# Promote replica2 to master.
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica2.tablet_alias])
# insert more data on replica2 (current master)
self._insert_data(tablet_replica2, 3)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# wait for replica1 to catch up.
self._check_data(tablet_replica1, 3,
'replica1 getting data from restored master')
tablet_replica2.kill_vttablet()
def _restore_old_master_test(self, restore_method):
"""Test that a former master replicates correctly after being restored.
- Take a backup.
- Reparent from old master to new master.
- Force old master to restore from a previous backup using restore_method.
Args:
restore_method: function accepting one parameter of type tablet.Tablet,
this function is called to force a restore on the provided tablet
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup.
restore_method(tablet_master)
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
def test_restore_old_master(self):
def _restore_using_kill(t):
t.kill_vttablet()
self._restore(t)
self._restore_old_master_test(_restore_using_kill)
def test_in_place_restore(self):
def _restore_in_place(t):
utils.run_vtctl(['RestoreFromBackup', t.tablet_alias], auto_log=True)
self._restore_old_master_test(_restore_in_place)
if __name__ == '__main__':
utils.main()
|
|
from copy import deepcopy
from collections import defaultdict
import logging
import os
from urlparse import urlparse
from lxml import etree
import requests
from regparser.notice.address import fetch_addresses
from regparser.notice.build_appendix import parse_appendix_changes
from regparser.notice.build_interp import parse_interp_changes
from regparser.notice.diff import parse_amdpar, find_section, find_subpart
from regparser.notice.diff import new_subpart_added
from regparser.notice.diff import DesignateAmendment
from regparser.notice.dates import fetch_dates
from regparser.notice.sxs import find_section_by_section
from regparser.notice.sxs import build_section_by_section
from regparser.notice.util import spaces_then_remove, swap_emphasis_tags
from regparser.notice import changes
from regparser.tree import struct
from regparser.tree.xml_parser import reg_text
from regparser.grammar.unified import notice_cfr_p
import settings
def build_notice(cfr_title, cfr_part, fr_notice, do_process_xml=True):
""" Given JSON from the federal register, create our notice structure """
logging.info('building notice, title {0}, part {1}, notice {2}'.format(
cfr_title, cfr_part, fr_notice['document_number']))
cfr_parts = set(str(ref['part']) for ref in fr_notice['cfr_references'])
cfr_parts.add(cfr_part)
notice = {'cfr_title': cfr_title,
'cfr_parts': list(cfr_parts),
'cfr_part': cfr_part}
notice_number = fr_notice['document_number']
# Check for configured overrides of the FR JSON for this notice
if notice_number in settings.FR_NOTICE_OVERRIDES:
logging.warning("overriding FR for {}".format(notice_number))
notice_overrides = settings.FR_NOTICE_OVERRIDES[notice_number]
for k, v in notice_overrides.iteritems():
fr_notice[k] = v
# Copy over most fields
for field in ['abstract', 'action', 'agency_names', 'comments_close_on',
'document_number', 'publication_date',
'regulation_id_numbers']:
if fr_notice[field]:
notice[field] = fr_notice[field]
if fr_notice['effective_on']:
notice['effective_on'] = fr_notice['effective_on']
notice['initial_effective_on'] = fr_notice['effective_on']
if fr_notice['html_url']:
notice['fr_url'] = fr_notice['html_url']
if fr_notice['citation']:
notice['fr_citation'] = fr_notice['citation']
notice['fr_volume'] = fr_notice['volume']
notice['meta'] = {}
for key in ('dates', 'end_page', 'start_page', 'type'):
notice['meta'][key] = fr_notice[key]
if fr_notice['full_text_xml_url'] and do_process_xml:
local_notices = _check_local_version_list(
fr_notice['full_text_xml_url'])
if len(local_notices) > 0:
logging.warning("using local xml for %s",
fr_notice['full_text_xml_url'])
return process_local_notices(local_notices, notice)
else:
logging.warning("fetching notice %s",
fr_notice['full_text_xml_url'])
notice_str = requests.get(fr_notice['full_text_xml_url']).content
return [process_notice(notice, notice_str)]
return [notice]
def split_doc_num(doc_num, effective_date):
""" If we have a split notice, we construct a document number
based on the original document number and the effective date. """
effective_date = ''.join(effective_date.split('-'))
return '%s_%s' % (doc_num, effective_date)
def process_local_notices(local_notices, partial_notice):
""" If we have any local notices, process them. Note that this takes into
account split notices (a single notice split into two because of different
effective dates"""
notices = []
if len(local_notices) > 1:
# If the notice is split, pick up the effective date and the
# CFR parts from the XML
partial_notice['effective_on'] = None
partial_notice['cfr_parts'] = None
for local_notice_file in local_notices:
with open(local_notice_file, 'r') as f:
notice = process_notice(partial_notice, f.read())
notices.append(notice)
notices = set_document_numbers(notices)
return notices
def set_document_numbers(notices):
""" If we have multiple notices, we need to fix their document
numbers. """
if len(notices) > 1:
for notice in notices:
notice['document_number'] = split_doc_num(
notice['document_number'], notice['effective_on'])
return notices
def process_notice(partial_notice, notice_str):
notice_xml = etree.fromstring(notice_str)
notice = dict(partial_notice)
notice_xml = preprocess_notice_xml(notice_xml)
process_xml(notice, notice_xml)
return notice
def _check_local_version_list(url):
"""Use any local copies (potentially with modifications of the FR XML)"""
parsed_url = urlparse(url)
path = parsed_url.path.replace('/', os.sep)
notice_dir_suffix, file_name = os.path.split(path)
for xml_path in settings.LOCAL_XML_PATHS:
if os.path.isfile(xml_path + path):
return [xml_path + path]
else:
notice_directory = xml_path + notice_dir_suffix
if os.path.exists(notice_directory):
notices = os.listdir(notice_directory)
prefix = file_name.split('.')[0]
relevant_notices = [os.path.join(notice_directory, n)
for n in notices if n.startswith(prefix)]
return relevant_notices
return []
def process_designate_subpart(amendment):
""" Process the designate amendment if it adds a subpart. """
if 'Subpart' in amendment.destination:
subpart_changes = {}
for label in amendment.labels:
label_id = '-'.join(label)
subpart_changes[label_id] = {
'action': 'DESIGNATE', 'destination': amendment.destination}
return subpart_changes
def process_new_subpart(notice, amd_label, par):
""" A new subpart has been added, create the notice changes. """
subpart_changes = {}
subpart_xml = find_subpart(par)
subpart = reg_text.build_subpart(amd_label.label[0], subpart_xml)
for change in changes.create_subpart_amendment(subpart):
subpart_changes.update(change)
return subpart_changes
def create_xmlless_changes(amended_labels, notice_changes):
"""Deletes, moves, and the like do not have an associated XML structure.
Add their changes"""
amend_map = changes.match_labels_and_changes(amended_labels, None)
for label, amendments in amend_map.iteritems():
for amendment in amendments:
if amendment['action'] == 'DELETE':
notice_changes.update({label: {'action': amendment['action']}})
elif amendment['action'] == 'MOVE':
change = {'action': amendment['action']}
destination = [d for d in amendment['destination'] if d != '?']
change['destination'] = destination
notice_changes.update({label: change})
elif amendment['action'] not in ('POST', 'PUT', 'RESERVE'):
logging.info('NOT HANDLED: %s' % amendment['action'])
def create_xml_changes(amended_labels, section, notice_changes,
subpart_label=None):
"""For PUT/POST, match the amendments to the section nodes that got
parsed, and actually create the notice changes. """
def per_node(node):
node.child_labels = [c.label_id() for c in node.children]
struct.walk(section, per_node)
amend_map = changes.match_labels_and_changes(amended_labels, section)
for label, amendments in amend_map.iteritems():
for amendment in amendments:
if amendment['action'] in ('POST', 'PUT'):
if (subpart_label and amendment['action'] == 'POST'
and len(label.split('-')) == 2):
amendment['extras'] = {'subpart': subpart_label}
if 'field' in amendment:
nodes = changes.create_field_amendment(label, amendment)
else:
nodes = changes.create_add_amendment(amendment)
for n in nodes:
notice_changes.update(n)
elif amendment['action'] == 'RESERVE':
change = changes.create_reserve_amendment(amendment)
notice_changes.update(change)
elif amendment['action'] not in ('DELETE', 'MOVE'):
logging.info('NOT HANDLED: %s' % amendment['action'])
class AmdparByParent(object):
"""Not all AMDPARs have a single REGTEXT/etc. section associated with them,
particularly for interpretations/appendices. This simple class wraps those
fields"""
def __init__(self, parent, first_amdpar):
self.parent = parent
self.amdpars = [first_amdpar]
def append(self, next_amdpar):
self.amdpars.append(next_amdpar)
def preprocess_notice_xml(notice_xml):
"""Unfortunately, the notice xml is often inaccurate. This function
attempts to fix some of those (general) flaws. For specific issues, we
tend to instead use the files in settings.LOCAL_XML_PATHS"""
notice_xml = deepcopy(notice_xml) # We will be destructive
# Last amdpar in a section; probably meant to add the amdpar to the
# next section
for amdpar in notice_xml.xpath("//AMDPAR"):
if amdpar.getnext() is None:
parent = amdpar.getparent()
next_parent = parent.getnext()
if (next_parent is not None
and parent.get('PART') == next_parent.get('PART')):
parent.remove(amdpar)
next_parent.insert(0, amdpar)
# Supplement I AMDPARs are often incorrect (labelled as Ps)
xpath_contains_supp = "contains(., 'Supplement I')"
xpath = "//REGTEXT//HD[@SOURCE='HD1' and %s]" % xpath_contains_supp
for supp_header in notice_xml.xpath(xpath):
parent = supp_header.getparent()
if (parent.xpath("./AMDPAR[%s]" % xpath_contains_supp)
or parent.xpath("./P[%s]" % xpath_contains_supp)):
pred = supp_header.getprevious()
while pred is not None:
if pred.tag not in ('P', 'AMDPAR'):
pred = pred.getprevious()
else:
pred.tag = 'AMDPAR'
if 'supplement i' in pred.text.lower():
pred = None
else:
pred = pred.getprevious()
# Clean up emphasized paragraph tags
for par in notice_xml.xpath("//P/*[position()=1 and name()='E']/.."):
em = par.getchildren()[0] # must be an E from the xpath
# wrap in a thunk to delay execution
par_text = lambda: par.text or ""
em_text, em_tail = lambda: em.text or "", lambda: em.tail or ""
par_open = par_text()[-1:] == "("
em_open = em_text()[:1] == "("
em_txt_closed = em_text()[-1:] == ")"
em_tail_closed = em_tail()[:1] == ")"
if (par_open or em_open) and (em_txt_closed or em_tail_closed):
if not par_open and em_open: # Move '(' out
par.text = par_text() + "("
em.text = em_text()[1:]
if not em_tail_closed and em_txt_closed: # Move ')' out
em.text = em_text()[:-1]
em.tail = ")" + em_tail()
return notice_xml
def process_amendments(notice, notice_xml):
""" Process the changes to the regulation that are expressed in the notice.
"""
amends = []
notice_changes = changes.NoticeChanges()
amdpars_by_parent = []
for par in notice_xml.xpath('//AMDPAR'):
parent = par.getparent()
exists = filter(lambda aXp: aXp.parent == parent, amdpars_by_parent)
if exists:
exists[0].append(par)
else:
amdpars_by_parent.append(AmdparByParent(parent, par))
default_cfr_part = notice['cfr_part']
for aXp in amdpars_by_parent:
amended_labels = []
designate_labels, other_labels = [], []
context = [default_cfr_part]
for par in aXp.amdpars:
als, context = parse_amdpar(par, context)
amended_labels.extend(als)
labels_by_part = defaultdict(list)
for al in amended_labels:
if isinstance(al, DesignateAmendment):
subpart_changes = process_designate_subpart(al)
if subpart_changes:
notice_changes.update(subpart_changes)
designate_labels.append(al)
elif new_subpart_added(al):
notice_changes.update(process_new_subpart(notice, al, par))
designate_labels.append(al)
else:
other_labels.append(al)
labels_by_part[al.label[0]].append(al)
create_xmlless_changes(other_labels, notice_changes)
# for cfr_part, rel_labels in labels_by_part.iteritems():
labels_for_part = {part: labels
for part, labels in labels_by_part.iteritems()
if part == default_cfr_part}
print(labels_for_part)
for cfr_part, rel_labels in labels_for_part.iteritems():
section_xml = find_section(par)
if section_xml is not None:
subparts = aXp.parent.xpath('.//SUBPART/HD')
if subparts:
subpart_label = [cfr_part, 'Subpart',
subparts[0].text[8:9]]
else:
subpart_label = None
for section in reg_text.build_from_section(cfr_part,
section_xml):
create_xml_changes(rel_labels, section, notice_changes,
subpart_label)
for appendix in parse_appendix_changes(rel_labels, cfr_part,
aXp.parent):
create_xml_changes(rel_labels, appendix, notice_changes)
interp = parse_interp_changes(rel_labels, cfr_part, aXp.parent)
if interp:
create_xml_changes(rel_labels, interp, notice_changes)
amends.extend(designate_labels)
amends.extend(other_labels)
# if other_labels: # Carry cfr_part through amendments
# default_cfr_part = other_labels[-1].label[0]
if amends:
notice['amendments'] = amends
notice['changes'] = notice_changes.changes
elif notice['document_number'] in settings.REISSUANCES:
notice['changes'] = {
default_cfr_part: [{
'action': 'PUT',
'node': reg_text.build_tree(notice_xml)
}]
}
def process_sxs(notice, notice_xml):
""" Find and build SXS from the notice_xml. """
sxs = find_section_by_section(notice_xml)
# note we will continue to use cfr_parts[0] as the default SxS label until
# we find a counter example
sxs = build_section_by_section(sxs, notice['meta']['start_page'],
notice['cfr_parts'][0])
notice['section_by_section'] = sxs
def fetch_cfr_parts(notice_xml):
""" Sometimes we need to read the CFR part numbers from the notice
XML itself. This would need to happen when we've broken up a
multiple-effective-date notice that has multiple CFR parts that
may not be included in each date. """
cfr_elm = notice_xml.xpath('//CFR')[0]
results = notice_cfr_p.parseString(cfr_elm.text)
return list(results)
def process_xml(notice, notice_xml):
"""Pull out relevant fields from the xml and add them to the notice"""
xml_chunk = notice_xml.xpath('//FURINF/P')
if xml_chunk:
notice['contact'] = xml_chunk[0].text
addresses = fetch_addresses(notice_xml)
if addresses:
notice['addresses'] = addresses
if not notice.get('effective_on'):
dates = fetch_dates(notice_xml)
if dates and 'effective' in dates:
notice['effective_on'] = dates['effective'][0]
if not notice.get('cfr_parts'):
cfr_parts = fetch_cfr_parts(notice_xml)
notice['cfr_parts'] = cfr_parts
process_sxs(notice, notice_xml)
process_amendments(notice, notice_xml)
add_footnotes(notice, notice_xml)
return notice
def add_footnotes(notice, notice_xml):
""" Parse the notice xml for footnotes and add them to the notice. """
notice['footnotes'] = {}
for child in notice_xml.xpath('//FTNT/*'):
spaces_then_remove(child, 'PRTPAGE')
swap_emphasis_tags(child)
ref = child.xpath('.//SU')
if ref:
child.text = ref[0].tail
child.remove(ref[0])
content = child.text
for cc in child:
content += etree.tostring(cc)
content += child.tail
notice['footnotes'][ref[0].text] = content.strip()
|
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from decimal import Decimal
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, satoshi_round
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransactionwithwallet(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
[
"-mempoolreplacement=0",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'b' * 35]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 DGB fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b' * 35]))]
tx1b_hex = txToHex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 DGB - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, CScript([1] * 35))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, CScript([1] * 35))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, CScript([1] * 35))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 DGB fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, CScript([1] * 35))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, CScript([1] * 35))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1a_hex = txToHex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), CScript([b'a' * 35]))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1_hex = txToHex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, CScript([b'a' * 35]))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b' * 35]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b' * 35]))]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), CScript([b'e' * 35]))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), CScript([b'f' * 35]))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a' * 35]))]
tx2a_hex = txToHex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), CScript([b'a' * 35]))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import getpass
import argparse
import json
import sys
import datetime
import codecs
from contextlib import closing
from egnyte import client, configuration, exc, base
parser_kwargs = dict(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50))
def create_main_parser():
main = argparse.ArgumentParser(prog="python -m egnyte", **parser_kwargs)
main.add_argument("-c", "--config-path", help="Path to config file")
main.add_argument('-v', '--verbose', action='count', dest='verbosity', help="Be more verbose. Can be repeated for debugging", default=0)
main.add_argument('--impersonate', metavar="USERNAME", help="Impersonate another user (username or email)", default=None)
subparsers = main.add_subparsers()
parser_config = subparsers.add_parser('config', help='commands related to configuration', **parser_kwargs)
subparsers_config = parser_config.add_subparsers()
parser_config_show = subparsers_config.add_parser('show', help="show configuration", **parser_kwargs)
parser_config_show.set_defaults(command="config_show")
parser_config_create = subparsers_config.add_parser('create', help='create a new configuration file', **parser_kwargs)
parser_config_create.set_defaults(command="config_create")
parser_config_update = subparsers_config.add_parser('update', help='update a configuration file', **parser_kwargs)
parser_config_update.set_defaults(command="config_update")
parser_config_token = subparsers_config.add_parser('token', help='generate a new access token and store it in config file', **parser_kwargs)
parser_config_token.set_defaults(command="config_token")
parser_token = subparsers.add_parser('token', help='generate a new access token and print it', **parser_kwargs)
parser_token.set_defaults(command='token')
parser_test = subparsers.add_parser('test', help='test if config is correct (connects to service)', **parser_kwargs)
parser_test.set_defaults(command='test')
for parser, required in [(parser_config_create, True), (parser_config_update, False), (parser_token, False)]:
parser.add_argument('-d', '--domain', required=required, help='domain name')
parser.add_argument('-l', '--login', required=False, help='login')
parser.add_argument('-p', '--password', required=False, help='password')
parser.add_argument('-k', '--key', dest='api_key', required=required, help='API key')
for parser in (parser_config_create, parser_config_update):
parser.add_argument('-t', '--token', dest='access_token', required=False, help='API access token')
parser.add_argument('-T', '--timeout', required=False, help='Request timeout')
# Audit generator
parser_audit = subparsers.add_parser('audit', help='generate audit reports', **parser_kwargs)
subparsers_audit = parser_audit.add_subparsers()
parser_audit_files = subparsers_audit.add_parser('files', help="create Files report", **parser_kwargs)
parser_audit_files.set_defaults(command="audit_files")
parser_audit_logins = subparsers_audit.add_parser('logins', help="create Logins report", **parser_kwargs)
parser_audit_logins.set_defaults(command="audit_logins")
parser_audit_permissions = subparsers_audit.add_parser('permissions', help="create Permissions report", **parser_kwargs)
parser_audit_permissions.set_defaults(command="audit_permissions")
parser_audit_get = subparsers_audit.add_parser('get', help="get a previously generated report", **parser_kwargs)
parser_audit_get.set_defaults(command="audit_get")
# Common options
for parser in (parser_audit_files, parser_audit_logins, parser_audit_permissions, parser_audit_get):
parser.add_argument('--save', required=False, default=None, help="File to save to the report to (default is standard output)")
for parser in (parser_audit_files, parser_audit_logins, parser_audit_permissions):
parser.add_argument('--format', required=False, default="csv", help="Report type (json or csv. Default is csv)")
parser.add_argument('--start', required=False, default='yesterday', help='Start date (YYYY-MM-DD)')
parser.add_argument('--end', required=False, default='today', help='End date (YYYY-MM-DD)')
parser_audit_files.add_argument('--folder', required=False, action='append', default=None, help="Absolute folder path for the destination folder. 'folder' or 'file' is required. Can be used multiple times")
parser_audit_files.add_argument('--file', required=False, default=None, help="Absolute folder path for the destination file, wildcards allowed. 'folder' or 'file' is required")
parser_audit_files.add_argument('--users', required=False, default=None, help='Users to report on (comma separated list, default is all)')
parser_audit_files.add_argument('--transaction_type', required=False, default=None, help="""
Transaction type: upload, download, preview, delete, copy, move, restore_trash, delete_trash, create_link, delete_link, download_link
(comma separated list, default is all""")
parser_audit_logins.add_argument('--events', required=True, help="Event types: logins, logouts, account_lockouts, password_resets, failed_attempts (comma separated list)")
parser_audit_logins.add_argument('--access-points', required=False, default=None, help="Access points to cover: Web, FTP, Mobile (comma separated list, default is all)")
parser_audit_logins.add_argument('--users', required=False, default=None, help='Users to report on (comma separated list, default is all)')
parser_audit_permissions.add_argument('--assigners', required=True, help='Permission assigners (comma separated list)')
parser_audit_permissions.add_argument('--folder', required=True, action='append', default=None, help="Absolute folder path for the destination folder. Can be used multiple times")
parser_audit_permissions.add_argument('--users', required=False, default=None, help='Users to report on (comma separated list)')
parser_audit_permissions.add_argument('--groups', required=False, default=None, help='Groups to report on (comma separated list)')
parser_audit_get.add_argument('--id', type=int, required=True, help="Id of the report")
parser_upload = subparsers.add_parser('upload', help='send files to Egnyte', **parser_kwargs)
parser_upload.set_defaults(command="upload")
parser_upload.add_argument('paths', nargs='+', help="Paths (files to directories) to upload")
parser_upload.add_argument('target', help="Path in Cloud File System to upload to")
parser_upload.add_argument('-x', '--exclude', action='append', default=None, help='Exclude items that match this glob pattern')
parser_download = subparsers.add_parser('download', help='download files from Egnyte', **parser_kwargs)
parser_download.set_defaults(command="download")
parser_download.add_argument('paths', nargs='+', help="Paths (files to directories) to download")
parser_download.add_argument('--target', help="Local directory to put downloaded files and directories in", default='.')
parser_download.add_argument('--overwrite', action='store_const', const=True, default=False, help="Delete local files and directories that conflict with cloud content")
parser_settings = subparsers.add_parser('settings', help='show domain settings', **parser_kwargs)
parser_settings.set_defaults(command="settings")
parser_search = subparsers.add_parser('search', help='search for files', **parser_kwargs)
parser_search.set_defaults(command="search")
parser_search.add_argument('query', help='Search query')
parser_search.add_argument('--mtime_from', help="Minimim modification date", default=None)
parser_search.add_argument('--mtime_to', help="Maximum modification date", default=None)
parser_search.add_argument('--folder', help="Limit search to a specified folder", default=None)
parser_events = subparsers.add_parser('events', help='show events from the domain', **parser_kwargs)
parser_events.set_defaults(command="events")
parser_events.add_argument('--start', type=int, help="Starting event id. Default or 0 - last seen event. Negative numbers are counter backwards from last event", default=None)
parser_events.add_argument('--stop', type=int, help="Stop event id. Default - poll indefinitely. 0 means last event. Negative numbers are counter backwards from last event", default=None)
parser_events.add_argument('--type', action='append', help="Limit to events of specific type", default=None)
parser_events.add_argument('--folder', help="Limit to events in specific folder and it's subfolders", default=None)
parser_events.add_argument('--suppress', help="Skip events caused by this app or user. Valid values: app, user.", default=None)
return main
def to_json(obj):
return {k:v for (k, v) in obj.__dict__.items() if not k.startswith('_')}
class Commands(object):
_config = None
config_keys = ('login', 'password', 'domain', 'api_key', 'access_token', 'timeout')
STATUS_CMD_NOT_FOUND = 1
STATUS_API_ERROR = 2
INFO = 1
DEBUG = 2
def load_config(self):
if self._config is None:
self._config = configuration.load(self.args.config_path)
return self._config
def save_config(self):
return configuration.save(self.config, self.args.config_path)
config = property(load_config)
def __init__(self, args):
self.args = args
@property
def info(self):
"""If verbosity is INFO or better"""
return self.args.verbosity >= self.INFO
@property
def debug(self):
"""If verbosity is INFO or better"""
return self.args.verbosity >= self.DEBUG
def run(self):
if not hasattr(self.args, 'command'):
print("Use -h or --help for help")
return
method = getattr(self, "cmd_%s" % self.args.command, None)
if self.debug:
print("running %s" % method.__name__)
if method is None:
print("Command '%s' not implemented yet" % self.args.command.replace('_', ' '))
return self.STATUS_CMD_NOT_FOUND
try:
return method()
except exc.EgnyteError as e:
if self.debug:
raise
print(repr(e))
return self.STATUS_API_ERROR
def get_client(self):
result = client.EgnyteClient(self.config)
if self.args.impersonate is not None:
result.impersonate(self.args.impersonate)
return result
def get_access_token(self):
config = self.require_password()
return base.get_access_token(config)
def merge_config(self):
"""Merge loaded config with command line params"""
for key in self.config_keys:
if getattr(self.args, key, None) is not None:
self.config[key] = getattr(self.args, key)
def require_password(self):
"""If config does not contain a password, ask user for it, but don't store it"""
if self.config['password']:
return self.config
else:
config = self.config.copy()
config['password'] = getpass.getpass("Enter the password: ")
return config
def print_json(self, obj):
print(json.dumps(obj, indent=2, sort_keys=True, default=to_json))
def cmd_config_show(self):
self.print_json(self.config)
def cmd_config_create(self):
self._config = {}
self.merge_config()
self.save_config()
def cmd_config_update(self):
self.merge_config()
self.save_config()
def cmd_config_token(self):
self.config['access_token'] = self.get_access_token()
self.save_config()
def cmd_token(self):
self.merge_config()
print(self.get_access_token())
def cmd_test(self):
api = self.get_client()
info = api.user_info
print("Connection successful for user %s" % (info['username'],))
def cmd_search(self):
api = self.get_client()
results = api.search.files(self.args.query, modified_before=self.args.mtime_to, modified_after=self.args.mtime_from, folder=self.args.folder)
self.print_json(results)
def common_audit_args(self):
format = self.args.format
date_start = self.date(self.args.start)
date_end = self.date(self.args.end)
return (format, date_start, date_end)
def date(self, value):
"""Poor mans human readable dates"""
if value == 'today':
return datetime.date.today()
elif value == 'yesterday':
return datetime.date.today() - datetime.timedelta(days=1)
else:
return datetime.date.datetime.strptime(value, "%Y-%m-%d").date()
def wait_and_save_report(self, report):
if self.args.save:
output = open(self.args.save, "wb")
print("Opened %s for writing, requesting report")
with closing(output):
report.wait()
report.download().write_to(output)
else:
report.wait()
download = report.download()
with closing(download):
lines = codecs.iterdecode(iter(download), 'UTF-8')
for line in lines:
print(line)
def comma_split(self, param):
value = getattr(self.args, param, None)
if value:
return value.split(',')
def cmd_audit_get(self):
api = self.get_client()
audits = api.audits
report = audits.get(id=self.args.id)
return self.wait_and_save_report(report)
def cmd_audit_files(self):
api = self.get_client()
audits = api.audits
folders = getattr(self.args, 'folder', None)
file = self.args.file
users = self.comma_split('users')
transaction_type = self.comma_split('transaction_type')
report = audits.files(*self.common_audit_args(), folders=folders, file=file, users=users, transaction_type=transaction_type)
return self.wait_and_save_report(report)
def cmd_audit_permissions(self):
api = self.get_client()
audits = api.audits
assigners = self.comma_split('assigner')
folders = self.args.folder
users = self.comma_split('users')
groups = self.comma_split('groups')
report = audits.permissions(*self.common_audit_args(), assigners=assigners, folders=folders, users=users, groups=groups)
return self.wait_and_save_report(report)
def cmd_audit_logins(self):
api = self.get_client()
audits = api.audits
users = self.comma_split('users')
events = self.comma_split('events')
access_points = self.comma_split('access_points')
report = audits.logins(*self.common_audit_args(), events=events, access_points=access_points, users=users)
return self.wait_and_save_report(report)
def transfer_callbacks(self):
if self.info:
if sys.stdout.isatty():
result = TerminalCallbacks()
if self.debug:
result.force_newline = True
return result
else:
return VerboseCallbacks()
def cmd_upload(self):
api = self.get_client()
api.bulk_upload(self.args.paths, self.args.target, self.args.exclude, self.transfer_callbacks())
def cmd_download(self):
api = self.get_client()
api.bulk_download(self.args.paths, self.args.target, self.args.overwrite, self.transfer_callbacks())
def cmd_settings(self):
self.print_json(self.get_client().settings)
def cmd_events(self):
start = self.args.start
stop = self.args.stop
events = self.get_client().events
if start is None:
start = events.latest_event_id
elif start <= 0:
start = events.latest_event_id + start
if stop is not None and stop <= 0:
stop = events.latest_event_id + stop
events = events.filter(start_id = start, suppress=self.args.suppress, folder=self.args.folder, types=self.args.type or None)
try:
for event in events:
self.print_json(event)
print()
if stop is not None and event.id >= stop:
break
except KeyboardInterrupt:
pass
class VerboseCallbacks(client.ProgressCallbacks):
"""Progress callbacks used when sys.stdout is a file or a pipe"""
def write(self, text, force_newline=False):
print(text)
def getting_info(self, cloud_path):
self.write("Getting info about %s" % cloud_path)
def got_info(self, cloud_obj):
self.write("Got info about %s" % cloud_obj.path)
def download_start(self, local_path, cloud_file, size):
self.write("Downloading %s" % local_path)
self.current = local_path
def upload_start(self, local_path, cloud_file, size):
self.write("Uploading %s" % local_path)
self.current = local_path
def creating_directory(self, cloud_folder):
self.write("Creating directory %s" % cloud_folder.path)
def skipped(self, cloud_obj, reason):
self.write("Skipped %s: %s" % (cloud_obj.path, reason), force_newline=True)
def finished(self):
self.write("Finished", force_newline=True)
class TerminalCallbacks(VerboseCallbacks):
"""Progress callbacks used when sys.stdout is a terminal"""
force_newline = False
def __init__(self):
self.last_len = 0
def write(self, text, force_newline=None):
if force_newline is None:
force_newline = self.force_newline
output = ["\r"]
sys.stdout.write("\r") # return the carret
if len(text) < self.last_len: # clear out previous text
sys.stdout.write(' ' * self.last_len)
sys.stdout.write("\r") # return the carret
output.append(text)
if force_newline:
output.append('\n')
sys.stdout.write("".join(output))
sys.stdout.flush()
self.last_len = len(text)
def download_progress(self, cloud_file, size, downloaded):
self.write("Downloading %s, %d%% complete" % (self.current, (downloaded * 100) / size))
def upload_progress(self, cloud_file, size, uploaded):
self.write("Uploading %s, %d%%" % (self.current, (uploaded * 100) / size))
def download_finish(self, cloud_file):
self.write("Downloaded %s" % self.current)
def upload_finish(self, cloud_file):
self.write("Uploaded %s" % self.current)
def main():
parsed = create_main_parser().parse_args()
sys.exit(Commands(parsed).run())
def full_help():
parser = create_main_parser()
return parser.format_help()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import copy
import os
import fixtures
import yaml
from os_client_config import cloud_config
from os_client_config import config
from os_client_config import defaults
from os_client_config import exceptions
from os_client_config.tests import base
class TestConfig(base.TestCase):
def test_get_all_clouds(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
clouds = c.get_all_clouds()
# We add one by hand because the regions cloud is going to exist
# twice since it has two regions in it
user_clouds = [
cloud for cloud in base.USER_CONF['clouds'].keys()
] + ['_test_cloud_regions']
configured_clouds = [cloud.name for cloud in clouds]
self.assertItemsEqual(user_clouds, configured_clouds)
def test_get_one_cloud(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cloud = c.get_one_cloud()
self.assertIsInstance(cloud, cloud_config.CloudConfig)
self.assertEqual(cloud.name, '')
def test_get_one_cloud_auth_defaults(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'})
self.assertEqual('user', cc.auth['username'])
self.assertEqual(
defaults._defaults['auth_type'],
cc.auth_type,
)
self.assertEqual(
defaults._defaults['identity_api_version'],
cc.identity_api_version,
)
def test_get_one_cloud_auth_override_defaults(self):
default_options = {'auth_type': 'token'}
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
override_defaults=default_options)
cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'})
self.assertEqual('user', cc.auth['username'])
self.assertEqual('token', cc.auth_type)
self.assertEqual(
defaults._defaults['identity_api_version'],
cc.identity_api_version,
)
def test_get_one_cloud_with_config_files(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertIsInstance(c.cloud_config, dict)
self.assertIn('cache', c.cloud_config)
self.assertIsInstance(c.cloud_config['cache'], dict)
self.assertIn('max_age', c.cloud_config['cache'])
self.assertIn('path', c.cloud_config['cache'])
cc = c.get_one_cloud('_test-cloud_')
self._assert_cloud_details(cc)
cc = c.get_one_cloud('_test_cloud_no_vendor')
self._assert_cloud_details(cc)
def test_get_one_cloud_with_int_project_id(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud('_test-cloud-int-project_')
self.assertEqual('12345', cc.auth['project_id'])
def test_get_one_cloud_with_hyphenated_project_id(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud('_test_cloud_hyphenated')
self.assertEqual('12345', cc.auth['project_id'])
def test_no_environ(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars')
def test_fallthrough(self):
c = config.OpenStackConfig(config_files=[self.no_yaml],
vendor_files=[self.no_yaml])
for k in os.environ.keys():
if k.startswith('OS_'):
self.useFixture(fixtures.EnvironmentVariable(k))
c.get_one_cloud(cloud='defaults')
def test_prefer_ipv6_true(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_')
self.assertTrue(cc.prefer_ipv6)
def test_prefer_ipv6_false(self):
c = config.OpenStackConfig(config_files=[self.no_yaml],
vendor_files=[self.no_yaml])
cc = c.get_one_cloud(cloud='defaults')
self.assertFalse(cc.prefer_ipv6)
def test_get_one_cloud_auth_merge(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'})
self.assertEqual('user', cc.auth['username'])
self.assertEqual('testpass', cc.auth['password'])
def test_get_cloud_names(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml])
self.assertEqual(
['_test-cloud-int-project_',
'_test-cloud_',
'_test_cloud_hyphenated',
'_test_cloud_no_vendor',
'_test_cloud_regions',
],
sorted(c.get_cloud_names()))
c = config.OpenStackConfig(config_files=[self.no_yaml],
vendor_files=[self.no_yaml])
for k in os.environ.keys():
if k.startswith('OS_'):
self.useFixture(fixtures.EnvironmentVariable(k))
c.get_one_cloud(cloud='defaults')
self.assertEqual(['defaults'], sorted(c.get_cloud_names()))
def test_set_one_cloud_creates_file(self):
config_dir = fixtures.TempDir()
self.useFixture(config_dir)
config_path = os.path.join(config_dir.path, 'clouds.yaml')
config.OpenStackConfig.set_one_cloud(config_path, '_test_cloud_')
self.assertTrue(os.path.isfile(config_path))
with open(config_path) as fh:
self.assertEqual({'clouds': {'_test_cloud_': {}}},
yaml.safe_load(fh))
def test_set_one_cloud_updates_cloud(self):
new_config = {
'cloud': 'new_cloud',
'auth': {
'password': 'newpass'
}
}
resulting_cloud_config = {
'auth': {
'password': 'newpass',
'username': 'testuser'
},
'cloud': 'new_cloud',
'profile': '_test_cloud_in_our_cloud',
'region_name': 'test-region'
}
resulting_config = copy.deepcopy(base.USER_CONF)
resulting_config['clouds']['_test-cloud_'] = resulting_cloud_config
config.OpenStackConfig.set_one_cloud(self.cloud_yaml, '_test-cloud_',
new_config)
with open(self.cloud_yaml) as fh:
written_config = yaml.safe_load(fh)
self.assertEqual(written_config, resulting_config)
class TestConfigArgparse(base.TestCase):
def setUp(self):
super(TestConfigArgparse, self).setUp()
self.options = argparse.Namespace(
region_name='other-test-region',
snack_type='cookie',
)
def test_get_one_cloud_argparse(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', argparse=self.options)
self._assert_cloud_details(cc)
self.assertEqual(cc.region_name, 'other-test-region')
self.assertEqual(cc.snack_type, 'cookie')
def test_get_one_cloud_just_argparse(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='', argparse=self.options)
self.assertIsNone(cc.cloud)
self.assertNotIn('username', cc.auth)
self.assertEqual(cc.region_name, 'other-test-region')
self.assertEqual(cc.snack_type, 'cookie')
def test_get_one_cloud_no_argparse(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None)
self._assert_cloud_details(cc)
self.assertEqual(cc.region_name, 'test-region')
self.assertIsNone(cc.snack_type)
def test_get_one_cloud_no_argparse_regions(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test_cloud_regions', argparse=None)
self._assert_cloud_details(cc)
self.assertEqual(cc.region_name, 'region1')
self.assertIsNone(cc.snack_type)
def test_get_one_cloud_no_argparse_region2(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(
cloud='_test_cloud_regions', region_name='region2', argparse=None)
self._assert_cloud_details(cc)
self.assertEqual(cc.region_name, 'region2')
self.assertIsNone(cc.snack_type)
def test_fix_env_args(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
env_args = {'os-compute-api-version': 1}
fixed_args = c._fix_args(env_args)
self.assertDictEqual({'compute_api_version': 1}, fixed_args)
class TestConfigDefault(base.TestCase):
def setUp(self):
super(TestConfigDefault, self).setUp()
# Reset defaults after each test so that other tests are
# not affected by any changes.
self.addCleanup(self._reset_defaults)
def _reset_defaults(self):
defaults._defaults = None
def test_set_no_default(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None)
self._assert_cloud_details(cc)
self.assertEqual('password', cc.auth_type)
def test_set_default_before_init(self):
config.set_default('auth_type', 'token')
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None)
self.assertEqual('token', cc.auth_type)
class TestBackwardsCompatibility(base.TestCase):
def test_set_no_default(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cloud = {
'identity_endpoint_type': 'admin',
'compute_endpoint_type': 'private',
'endpoint_type': 'public',
'auth_type': 'v3password',
}
result = c._fix_backwards_interface(cloud)
expected = {
'identity_interface': 'admin',
'compute_interface': 'private',
'interface': 'public',
'auth_type': 'v3password',
}
self.assertEqual(expected, result)
|
|
###############################################################################
# The MIT License
# Copyright (c) 2012 ObjectLabs Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'abdul'
import string
import types
import sys
from argparse import ArgumentParser
from argparse import ArgumentError
from argparse import HelpFormatter
###############################################################################
# Constants
###############################################################################
OPTIONAL_ARG_TYPE = "optional"
POSITIONAL_ARG_TYPE = "positional"
###############################################################################
# API
###############################################################################
def build_parser(parser_def):
parser = DargeParser(parser_def)
setup_parser(parser, parser_def)
# create subparsers if specified
children = parser.get_child_definitions()
if children is not None and len(children) > 0:
subparsers = parser.add_subparsers()
for child_def in children:
childname = child_def["prog"]
child_parser = subparsers.add_parser(
childname,
definition_document=child_def,
prog = childname,
parent_dargeparser=parser)
setup_parser(child_parser, child_def)
return parser
###############################################################################
def setup_parser(parser, parser_def):
parser.prog = get_document_property(parser_def, "prog", parser.prog)
parser.description= get_document_property(parser_def, "description")
parser.usage = get_document_property(parser_def, "usage")
#setup args
arguments = get_document_property(parser_def, "args")
if arguments is not None and len(arguments) > 0:
_setup_parser_args(parser, arguments)
# setup function to call
func = get_document_property(parser_def, "function")
if func is not None:
# check of func is a fully qualified function name and eval it
if type(func) is types.StringType:
func = resolve_function(func)
if callable(func):
parser.set_defaults(func=func)
else:
raise DargeparseException("%s is not callable" % func)
###############################################################################
def _setup_parser_args(parser, arguments):
for arg_def in arguments:
argname = get_arg_name(arg_def)
cmd_arg = get_cmd_arg(arg_def)
arg_kwargs = make_arg_kwargs(arg_def)
parser.add_argument(*listify(cmd_arg), **arg_kwargs)
###############################################################################
def make_arg_kwargs(arg_def):
argname = get_arg_name(arg_def)
kwargs = {"help": get_document_property(arg_def, "help", "")}
display_name = get_document_property(arg_def, "displayName")
if display_name is not None:
kwargs["metavar"] = display_name
cmd_arg = get_cmd_arg(arg_def)
if (cmd_arg is not None and
argname not in listify(cmd_arg)):
kwargs["dest"] = argname
nargs = get_document_property(arg_def, "nargs")
action = get_document_property(arg_def, "action")
value_type = get_document_property(arg_def, "valueType")
required = get_document_property(arg_def, "required")
version = get_document_property(arg_def, "version")
if nargs is not None:
if nargs == 0:
action = "store_true" if action is None else action
elif nargs > 1:
kwargs["nargs"] = nargs
if action is not None:
kwargs["action"] = action
if value_type is not None:
kwargs["type"] = value_type
if required is not None:
kwargs["required"] = required
if version is not None:
kwargs["version"] = version
default = get_document_property(arg_def, "default")
kwargs["default"] = default
return kwargs
###############################################################################
######################## ####################################
######################## Classes ####################################
######################## ####################################
###############################################################################
class DargeHelpFormatter(HelpFormatter):
###########################################################################
def add_usage(self, usage, actions, groups, prefix=None):
HelpFormatter.add_usage(self, usage, actions, groups, prefix="Usage: ")
###############################################################################
class DargeParser(ArgumentParser):
###########################################################################
# Constructor
###########################################################################
def __init__(self,
definition_document=None,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=DargeHelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
parent_dargeparser=None):
ArgumentParser.__init__(
self,
prog=prog,
usage=usage,
description=description,
epilog=epilog,
version=version,
parents=parents,
formatter_class=formatter_class,
prefix_chars=prefix_chars,
fromfile_prefix_chars=fromfile_prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler,
add_help=add_help)
if definition_document is None:
raise Exception("definition_document cannot be None")
self._definition_document = definition_document
self.parent_dargeparser = parent_dargeparser
self.current_parsing_child = None
###########################################################################
def _check_value(self, action, command_name):
# converted value must be one of the choices (if specified)
if action.choices is not None and command_name not in action.choices:
raise ArgumentError(action, "Unknown command: '%s'\n"
"Enter '%s --help' for a list"
" of commands." %
(command_name, self.prog))
###########################################################################
def print_usage(self, file=None):
print self.get_usage()
###########################################################################
def print_help(self, file=None):
print self.get_usage() + '\n'
print self.description + '\n'
print self.get_positionals_help()
print self.get_options_help()
print self._make_epilog()
###########################################################################
def parse_args(self, raw_args=None, namespace=None):
args, argv = self.parse_known_args(raw_args, namespace)
if argv:
argv_str = ' '.join(argv)
full_prog = self.get_errored_parser().get_full_prog()
details_msg = "see '%s --help' for detailed options" % full_prog
msg = ("unrecognized arguments: %s" % argv_str +
"\n" + self.get_errored_parser().get_usage() +
"\n" + details_msg)
self.error(msg)
# add additional information
args.raw_args = raw_args
def is_arg_specified(arg_name):
return arg_name in raw_args
args.is_arg_specified = is_arg_specified
return args
###########################################################################
def parse_known_args(self, args=None, namespace=None):
if self.parent_dargeparser is not None:
self.parent_dargeparser.current_parsing_child = self
return ArgumentParser.parse_known_args(self,args,namespace)
###########################################################################
def error(self, message):
if message == "too few arguments":
self.print_help()
self.exit(2)
self.exit(2, message)
###########################################################################
def get_usage(self):
return self.usage if self.usage else self._make_usage()
###########################################################################
def get_full_prog(self):
if self.parent_dargeparser is not None:
return "%s %s" % (self.parent_dargeparser.prog , self.prog)
else:
return self.prog
###########################################################################
def _make_usage(self):
optionals = "[<options>] " if self.has_optional_args() else ""
positionals = " ".join(self.get_positional_arg_display_names())
return "Usage: %s %s%s" % (self.prog,
optionals,
positionals)
###########################################################################
def _make_epilog(self):
return "%s" % self._make_children_epilog()
###########################################################################
def _make_children_epilog(self):
if not self.has_child_definitions():
return ""
## inlined function
def _make_child_epilog(child_def):
child_prog = child_def["prog"]
return "%s - %s" % (string.ljust(child_prog, 25),
child_def['shortDescription'])
epilog = "Commands:"
for child_group in self._get_child_groups():
group_children = self.get_child_definitions_by_group(
child_group['name'])
# filter out hidden children
group_children = filter(lambda child:
not is_hidden_definition(child),
group_children)
command_list = map(_make_child_epilog ,group_children)
command_list_string = "\n ".join(command_list)
group_display = ""
if child_group['display']:
group_display = "%s:" % child_group['display']
epilog += "\n %s\n %s\n" % (group_display, command_list_string)
epilog_suffix = "See '%s <command> --help' for more help on" \
" a specific command." % (self.prog)
epilog += "\n%s\n" % epilog_suffix
return epilog
###########################################################################
def get_positionals_help(self):
# if this is the root parser then positionals help == children help
# which is done in _make_children_epilog
if self.parent_dargeparser is None:
return ""
formatter = self._get_formatter()
positional_actions = self._get_positional_actions()
if positional_actions:
formatter._indent()
for action in positional_actions:
formatter.add_argument(action)
return "Arguments:\n%s" % formatter.format_help()
else:
return ""
###########################################################################
def get_options_help(self):
formatter = self._get_formatter()
optional_actions = self._get_optional_actions()
if optional_actions:
formatter._indent()
for action in optional_actions:
# skip hidden options
arg_def = self.get_arg_definition(action.dest)
if arg_def and arg_def.get("hidden"):
continue
formatter.add_argument(action)
return "Options:\n%s" % formatter.format_help()
else:
return ""
###########################################################################
def _get_child_groups(self):
child_groups = get_document_property(self._definition_document,
"child_groups")
if child_groups is None or len(child_groups) < 1:
child_groups = [DEFAULT_CHILD_GROUP]
return child_groups
###########################################################################
def get_child_definitions_by_group(self, group_name):
if group_name == DEFAULT_CHILD_GROUP['name']:
return self.get_child_definitions()
else:
return filter(
lambda child:
get_document_property(child, "group") == group_name,
self.get_child_definitions())
###########################################################################
def get_child_definitions(self):
return get_document_property(
self._definition_document,
"children",
[])
###########################################################################
def has_child_definitions(self):
children = self.get_child_definitions()
return children is not None and len(children) > 0
###########################################################################
def get_arg_definitions(self):
return get_document_property(
self._definition_document,
"args",
[])
###########################################################################
def get_arg_definition(self, arg_name):
d = filter(
lambda arg_def: get_arg_name(arg_def) == arg_name,
self.get_arg_definitions())
return d[0] if d else None
###########################################################################
def get_optional_args(self):
return filter(
lambda arg_def: get_arg_type(arg_def) == OPTIONAL_ARG_TYPE,
self.get_arg_definitions())
###########################################################################
def has_optional_args(self):
return len(self.get_optional_args()) > 0
###########################################################################
def get_positional_args(self):
return filter(
lambda arg_def: get_arg_type(arg_def) == POSITIONAL_ARG_TYPE,
self.get_arg_definitions())
###########################################################################
def get_positional_arg_names(self):
return map(
lambda arg_def: get_arg_name(arg_def), self.get_positional_args())
###########################################################################
def get_positional_arg_display_names(self):
return map(
lambda arg_def: get_arg_display_name(arg_def),
self.get_positional_args())
###########################################################################
def get_parent_dargeparser(self):
return self.parent_dargeparser
###########################################################################
def get_errored_parser(self):
if self.current_parsing_child:
return self.current_parsing_child
else:
return self
###########################################################################
DEFAULT_CHILD_GROUP = {
"name" :"allCommands",
"display": ""
}
###############################################################################
def is_hidden_definition(definition):
return "hidden" in definition and definition["hidden"]
###############################################################################
# Arg Definition Functions
###############################################################################
def is_optional_arg(arg_def):
return get_arg_type(arg_def) == OPTIONAL_ARG_TYPE
###############################################################################
def get_arg_name(arg_def):
return get_document_property(arg_def, "name")
###############################################################################
def get_arg_type(arg_def):
return get_document_property(arg_def, "type")
###############################################################################
def get_cmd_arg(arg_def):
cmd_arg = get_document_property(arg_def, "cmd_arg")
if cmd_arg is None:
cmd_arg = get_arg_name(arg_def)
return cmd_arg
###############################################################################
def get_arg_display_name(arg_def):
return get_document_property(arg_def, "displayName", get_arg_name(arg_def))
###############################################################################
# Dargeparse Exception class
###############################################################################
class DargeparseException(Exception):
def __init__(self, message,cause=None):
self.message = message
self.cause = cause
def __str__(self):
return self.message
###############################################################################
# Objects Utility Functions
###############################################################################
def listify(object):
if isinstance(object, list):
return object
return [object]
###############################################################################
def get_document_property(document, name, default=None):
if document.has_key(name):
return document[name]
else:
return default
###############################################################################
def resolve_function(full_func_name):
names = full_func_name.split(".")
module_name = ".".join(names[:-1])
module_obj = __import__(module_name)
result = module_obj
for name in names[1:]:
result = getattr(result, name)
return result
|
|
"""Support for Android IP Webcam."""
import asyncio
from datetime import timedelta
import logging
from pydroid_ipcam import PyDroidIPCam
import voluptuous as vol
from homeassistant.components.mjpeg.camera import CONF_MJPEG_URL, CONF_STILL_IMAGE_URL
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TIMEOUT,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
ATTR_AUD_CONNS = "Audio Connections"
ATTR_HOST = "host"
ATTR_VID_CONNS = "Video Connections"
CONF_MOTION_SENSOR = "motion_sensor"
DATA_IP_WEBCAM = "android_ip_webcam"
DEFAULT_NAME = "IP Webcam"
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 10
DOMAIN = "android_ip_webcam"
SCAN_INTERVAL = timedelta(seconds=10)
SIGNAL_UPDATE_DATA = "android_ip_webcam_update"
KEY_MAP = {
"audio_connections": "Audio Connections",
"adet_limit": "Audio Trigger Limit",
"antibanding": "Anti-banding",
"audio_only": "Audio Only",
"battery_level": "Battery Level",
"battery_temp": "Battery Temperature",
"battery_voltage": "Battery Voltage",
"coloreffect": "Color Effect",
"exposure": "Exposure Level",
"exposure_lock": "Exposure Lock",
"ffc": "Front-facing Camera",
"flashmode": "Flash Mode",
"focus": "Focus",
"focus_homing": "Focus Homing",
"focus_region": "Focus Region",
"focusmode": "Focus Mode",
"gps_active": "GPS Active",
"idle": "Idle",
"ip_address": "IPv4 Address",
"ipv6_address": "IPv6 Address",
"ivideon_streaming": "Ivideon Streaming",
"light": "Light Level",
"mirror_flip": "Mirror Flip",
"motion": "Motion",
"motion_active": "Motion Active",
"motion_detect": "Motion Detection",
"motion_event": "Motion Event",
"motion_limit": "Motion Limit",
"night_vision": "Night Vision",
"night_vision_average": "Night Vision Average",
"night_vision_gain": "Night Vision Gain",
"orientation": "Orientation",
"overlay": "Overlay",
"photo_size": "Photo Size",
"pressure": "Pressure",
"proximity": "Proximity",
"quality": "Quality",
"scenemode": "Scene Mode",
"sound": "Sound",
"sound_event": "Sound Event",
"sound_timeout": "Sound Timeout",
"torch": "Torch",
"video_connections": "Video Connections",
"video_chunk_len": "Video Chunk Length",
"video_recording": "Video Recording",
"video_size": "Video Size",
"whitebalance": "White Balance",
"whitebalance_lock": "White Balance Lock",
"zoom": "Zoom",
}
ICON_MAP = {
"audio_connections": "mdi:speaker",
"battery_level": "mdi:battery",
"battery_temp": "mdi:thermometer",
"battery_voltage": "mdi:battery-charging-100",
"exposure_lock": "mdi:camera",
"ffc": "mdi:camera-front-variant",
"focus": "mdi:image-filter-center-focus",
"gps_active": "mdi:crosshairs-gps",
"light": "mdi:flashlight",
"motion": "mdi:run",
"night_vision": "mdi:weather-night",
"overlay": "mdi:monitor",
"pressure": "mdi:gauge",
"proximity": "mdi:map-marker-radius",
"quality": "mdi:quality-high",
"sound": "mdi:speaker",
"sound_event": "mdi:speaker",
"sound_timeout": "mdi:speaker",
"torch": "mdi:white-balance-sunny",
"video_chunk_len": "mdi:video",
"video_connections": "mdi:eye",
"video_recording": "mdi:record-rec",
"whitebalance_lock": "mdi:white-balance-auto",
}
SWITCHES = [
"exposure_lock",
"ffc",
"focus",
"gps_active",
"motion_detect",
"night_vision",
"overlay",
"torch",
"whitebalance_lock",
"video_recording",
]
SENSORS = [
"audio_connections",
"battery_level",
"battery_temp",
"battery_voltage",
"light",
"motion",
"pressure",
"proximity",
"sound",
"video_connections",
]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(
CONF_TIMEOUT, default=DEFAULT_TIMEOUT
): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=SCAN_INTERVAL
): cv.time_period,
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_SWITCHES): vol.All(
cv.ensure_list, [vol.In(SWITCHES)]
),
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSORS)]
),
vol.Optional(CONF_MOTION_SENSOR): cv.boolean,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the IP Webcam component."""
webcams = hass.data[DATA_IP_WEBCAM] = {}
websession = async_get_clientsession(hass)
async def async_setup_ipcamera(cam_config):
"""Set up an IP camera."""
host = cam_config[CONF_HOST]
username = cam_config.get(CONF_USERNAME)
password = cam_config.get(CONF_PASSWORD)
name = cam_config[CONF_NAME]
interval = cam_config[CONF_SCAN_INTERVAL]
switches = cam_config.get(CONF_SWITCHES)
sensors = cam_config.get(CONF_SENSORS)
motion = cam_config.get(CONF_MOTION_SENSOR)
# Init ip webcam
cam = PyDroidIPCam(
hass.loop,
websession,
host,
cam_config[CONF_PORT],
username=username,
password=password,
timeout=cam_config[CONF_TIMEOUT],
)
if switches is None:
switches = [
setting for setting in cam.enabled_settings if setting in SWITCHES
]
if sensors is None:
sensors = [sensor for sensor in cam.enabled_sensors if sensor in SENSORS]
sensors.extend(["audio_connections", "video_connections"])
if motion is None:
motion = "motion_active" in cam.enabled_sensors
async def async_update_data(now):
"""Update data from IP camera in SCAN_INTERVAL."""
await cam.update()
async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host)
async_track_point_in_utc_time(hass, async_update_data, utcnow() + interval)
await async_update_data(None)
# Load platforms
webcams[host] = cam
mjpeg_camera = {
CONF_PLATFORM: "mjpeg",
CONF_MJPEG_URL: cam.mjpeg_url,
CONF_STILL_IMAGE_URL: cam.image_url,
CONF_NAME: name,
}
if username and password:
mjpeg_camera.update({CONF_USERNAME: username, CONF_PASSWORD: password})
hass.async_create_task(
discovery.async_load_platform(hass, "camera", "mjpeg", mjpeg_camera, config)
)
if sensors:
hass.async_create_task(
discovery.async_load_platform(
hass,
"sensor",
DOMAIN,
{CONF_NAME: name, CONF_HOST: host, CONF_SENSORS: sensors},
config,
)
)
if switches:
hass.async_create_task(
discovery.async_load_platform(
hass,
"switch",
DOMAIN,
{CONF_NAME: name, CONF_HOST: host, CONF_SWITCHES: switches},
config,
)
)
if motion:
hass.async_create_task(
discovery.async_load_platform(
hass,
"binary_sensor",
DOMAIN,
{CONF_HOST: host, CONF_NAME: name},
config,
)
)
tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]]
if tasks:
await asyncio.wait(tasks)
return True
class AndroidIPCamEntity(Entity):
"""The Android device running IP Webcam."""
def __init__(self, host, ipcam):
"""Initialize the data object."""
self._host = host
self._ipcam = ipcam
async def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_ipcam_update(host):
"""Update callback."""
if self._host != host:
return
self.async_schedule_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update)
)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._ipcam.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {ATTR_HOST: self._host}
if self._ipcam.status_data is None:
return state_attr
state_attr[ATTR_VID_CONNS] = self._ipcam.status_data.get("video_connections")
state_attr[ATTR_AUD_CONNS] = self._ipcam.status_data.get("audio_connections")
return state_attr
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple, OrderedDict
import six
import ibis.common as com
import ibis.util as util
class Schema(object):
"""
Holds table schema information
"""
def __init__(self, names, types):
if not isinstance(names, list):
names = list(names)
self.names = names
self.types = [validate_type(x) for x in types]
self._name_locs = dict((v, i) for i, v in enumerate(self.names))
if len(self._name_locs) < len(self.names):
raise com.IntegrityError('Duplicate column names')
def __repr__(self):
space = 2 + max(map(len, self.names))
return "ibis.Schema {{{0}\n}}".format(
util.indent(
''.join(
'\n{0}{1}'.format(name.ljust(space), str(tipo))
for name, tipo in zip(self.names, self.types)
),
2
)
)
def __len__(self):
return len(self.names)
def __iter__(self):
return iter(self.names)
def __contains__(self, name):
return name in self._name_locs
def __getitem__(self, name):
return self.types[self._name_locs[name]]
def delete(self, names_to_delete):
for name in names_to_delete:
if name not in self:
raise KeyError(name)
new_names, new_types = [], []
for name, type_ in zip(self.names, self.types):
if name in names_to_delete:
continue
new_names.append(name)
new_types.append(type_)
return Schema(new_names, new_types)
@classmethod
def from_tuples(cls, values):
if not isinstance(values, (list, tuple)):
values = list(values)
if len(values):
names, types = zip(*values)
else:
names, types = [], []
return Schema(names, types)
@classmethod
def from_dict(cls, values):
names = list(values.keys())
types = values.values()
return Schema(names, types)
def equals(self, other, cache=None):
return self.names == other.names and self.types == other.types
def __eq__(self, other):
return self.equals(other)
def get_type(self, name):
return self.types[self._name_locs[name]]
def append(self, schema):
names = self.names + schema.names
types = self.types + schema.types
return Schema(names, types)
def items(self):
return zip(self.names, self.types)
class HasSchema(object):
"""
Base class representing a structured dataset with a well-defined
schema.
Base implementation is for tables that do not reference a particular
concrete dataset or database table.
"""
def __init__(self, schema, name=None):
assert isinstance(schema, Schema)
self._schema = schema
self._name = name
def __repr__(self):
return self._repr()
def _repr(self):
return "%s(%s)" % (type(self).__name__, repr(self.schema))
@property
def schema(self):
return self._schema
def get_schema(self):
return self._schema
def has_schema(self):
return True
@property
def name(self):
return self._name
def equals(self, other, cache=None):
if type(self) != type(other):
return False
return self.schema.equals(other.schema, cache=cache)
def root_tables(self):
return [self]
class DataType(object):
def __init__(self, nullable=True):
self.nullable = nullable
def __call__(self, nullable=True):
return self._factory(nullable=nullable)
def _factory(self, nullable=True):
return type(self)(nullable=nullable)
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(type(self))
def __repr__(self):
name = self.name.lower()
if not self.nullable:
name = '{0}[non-nullable]'.format(name)
return name
@property
def name(self):
return type(self).__name__
def equals(self, other, cache=None):
if isinstance(other, six.string_types):
other = validate_type(other)
return (isinstance(other, type(self)) and
self.nullable == other.nullable)
def can_implicit_cast(self, other):
return self.equals(other)
def scalar_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Scalar'.format(type(self).__name__))
def array_type(self):
import ibis.expr.types as ir
return getattr(ir, '{0}Column'.format(type(self).__name__))
class Any(DataType):
pass
class Primitive(DataType):
pass
class Null(DataType):
pass
class Variadic(DataType):
pass
class Boolean(Primitive):
pass
Bounds = namedtuple('Bounds', ('upper', 'lower'))
class Integer(Primitive):
@property
def bounds(self):
exp = self._nbytes * 8 - 1
lower = -1 << exp
return Bounds(lower=lower, upper=~lower)
def can_implicit_cast(self, other):
return (
isinstance(other, Integer) and
(type(self) is Integer or other._nbytes <= self._nbytes)
)
class String(Variadic):
pass
class Date(Primitive):
pass
class Timestamp(Primitive):
pass
class SignedInteger(Integer):
pass
class Floating(Primitive):
def can_implicit_cast(self, other):
if isinstance(other, Integer):
return True
elif isinstance(other, Floating):
# return other._nbytes <= self._nbytes
return True
else:
return False
class Int8(Integer):
_nbytes = 1
class Int16(Integer):
_nbytes = 2
class Int32(Integer):
_nbytes = 4
class Int64(Integer):
_nbytes = 8
class Float(Floating):
_nbytes = 4
class Double(Floating):
_nbytes = 8
def parametric(cls):
type_name = cls.__name__
array_type_name = '{0}Column'.format(type_name)
scalar_type_name = '{0}Scalar'.format(type_name)
def array_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, array_type_name)(op, self, name=name)
return constructor
def scalar_type(self):
def constructor(op, name=None):
import ibis.expr.types as ir
return getattr(ir, scalar_type_name)(op, self, name=name)
return constructor
cls.array_type = array_type
cls.scalar_type = scalar_type
return cls
@parametric
class Decimal(DataType):
# Decimal types are parametric, we store the parameters in this object
def __init__(self, precision, scale, nullable=True):
super(Decimal, self).__init__(nullable=nullable)
self.precision = precision
self.scale = scale
def __repr__(self):
return '{0}(precision={1:d}, scale={2:d})'.format(
self.name,
self.precision,
self.scale,
)
def __str__(self):
return '{0}({1:d}, {2:d})'.format(
self.name.lower(),
self.precision,
self.scale,
)
def __hash__(self):
return hash((self.precision, self.scale))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return (
isinstance(other, Decimal) and
self.precision == other.precision and
self.scale == other.scale
)
@classmethod
def can_implicit_cast(cls, other):
return isinstance(other, (Floating, Decimal))
@parametric
class Category(DataType):
def __init__(self, cardinality=None, nullable=True):
super(Category, self).__init__(nullable=nullable)
self.cardinality = cardinality
def __repr__(self):
if self.cardinality is not None:
cardinality = self.cardinality
else:
cardinality = 'unknown'
return 'category(K={0})'.format(cardinality)
def __hash__(self):
return hash(self.cardinality)
def __eq__(self, other):
if not isinstance(other, Category):
return False
return self.cardinality == other.cardinality
def to_integer_type(self):
cardinality = self.cardinality
if cardinality is None:
return int64
elif cardinality < int8.bounds.upper:
return int8
elif cardinality < int16.bounds.upper:
return int16
elif cardinality < int32.bounds.upper:
return int32
else:
return int64
@parametric
class Struct(DataType):
def __init__(self, names, types, nullable=True):
super(Struct, self).__init__(nullable=nullable)
self.names = names
self.types = types
def __repr__(self):
return '{0}({1})'.format(
self.name,
list(zip(self.names, self.types))
)
def __str__(self):
return '{0}<{1}>'.format(
self.name.lower(),
', '.join(
'{0}: {1}'.format(n, t) for n, t in zip(self.names, self.types)
)
)
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.names == other.names and
self.types == other.types)
@classmethod
def from_tuples(self, pairs):
return Struct(*map(list, zip(*pairs)))
@parametric
class Array(Variadic):
def __init__(self, value_type, nullable=True):
super(Array, self).__init__(nullable=nullable)
self.value_type = value_type
def __repr__(self):
return '{0}({1})'.format(self.name, repr(self.value_type))
def __str__(self):
return '{0}<{1}>'.format(self.name.lower(), self.value_type)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.value_type == other.value_type
)
@parametric
class Enum(DataType):
def __init__(self, rep_type, value_type, nullable=True):
super(Enum, self).__init__(nullable=nullable)
self.rep_type = rep_type
self.value_type = value_type
@parametric
class Map(DataType):
def __init__(self, key_type, value_type, nullable=True):
super(Map, self).__init__(nullable=nullable)
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return '{0}({1}, {2})'.format(
self.name,
repr(self.key_type),
repr(self.value_type),
)
def __str__(self):
return '{0}<{1}, {2}>'.format(
self.name.lower(),
self.key_type,
self.value_type,
)
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.key_type == other.key_type and
self.value_type == other.value_type
)
# ---------------------------------------------------------------------
any = Any()
null = Null()
boolean = Boolean()
int_ = Integer()
int8 = Int8()
int16 = Int16()
int32 = Int32()
int64 = Int64()
float = Float()
double = Double()
string = String()
date = Date()
timestamp = Timestamp()
_primitive_types = {
'any': any,
'null': null,
'boolean': boolean,
'int8': int8,
'int16': int16,
'int32': int32,
'int64': int64,
'float': float,
'double': double,
'string': string,
'date': date,
'timestamp': timestamp
}
class Tokens(object):
"""Class to hold tokens for lexing
"""
__slots__ = ()
ANY = 0
NULL = 1
PRIMITIVE = 2
DECIMAL = 3
VARCHAR = 4
CHAR = 5
ARRAY = 6
MAP = 7
STRUCT = 8
INTEGER = 9
FIELD = 10
COMMA = 11
COLON = 12
LPAREN = 13
RPAREN = 14
LBRACKET = 15
RBRACKET = 16
@staticmethod
def name(value):
return _token_names[value]
_token_names = dict(
(getattr(Tokens, n), n)
for n in dir(Tokens) if n.isalpha() and n.isupper()
)
Token = namedtuple('Token', ('type', 'value'))
_TYPE_RULES = OrderedDict(
[
# any, null
('(?P<ANY>any)', lambda token: Token(Tokens.ANY, any)),
('(?P<NULL>null)', lambda token: Token(Tokens.NULL, null)),
] + [
# primitive types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, value=value: Token(Tokens.PRIMITIVE, value)
) for token, value in _primitive_types.items()
if token != 'any' and token != 'null'
] + [
# decimal + complex types
(
'(?P<{}>{})'.format(token.upper(), token),
lambda token, toktype=toktype: Token(toktype, token)
) for token, toktype in zip(
('decimal', 'varchar', 'char', 'array', 'map', 'struct'),
(
Tokens.DECIMAL,
Tokens.VARCHAR,
Tokens.CHAR,
Tokens.ARRAY,
Tokens.MAP,
Tokens.STRUCT
),
)
] + [
# numbers, for decimal spec
(r'(?P<INTEGER>\d+)', lambda token: Token(Tokens.INTEGER, int(token))),
# struct fields
(
r'(?P<FIELD>[a-zA-Z_][a-zA-Z_0-9]*)',
lambda token: Token(Tokens.FIELD, token)
),
('(?P<COMMA>,)', lambda token: Token(Tokens.COMMA, token)),
('(?P<COLON>:)', lambda token: Token(Tokens.COLON, token)),
(r'(?P<LPAREN>\()', lambda token: Token(Tokens.LPAREN, token)),
(r'(?P<RPAREN>\))', lambda token: Token(Tokens.RPAREN, token)),
('(?P<LBRACKET><)', lambda token: Token(Tokens.LBRACKET, token)),
('(?P<RBRACKET>>)', lambda token: Token(Tokens.RBRACKET, token)),
(r'(?P<WHITESPACE>\s+)', None),
]
)
_TYPE_KEYS = tuple(_TYPE_RULES.keys())
_TYPE_PATTERN = re.compile('|'.join(_TYPE_KEYS), flags=re.IGNORECASE)
def _generate_tokens(pat, text):
"""Generate a sequence of tokens from `text` that match `pat`
Parameters
----------
pat : compiled regex
The pattern to use for tokenization
text : str
The text to tokenize
"""
rules = _TYPE_RULES
keys = _TYPE_KEYS
groupindex = pat.groupindex
for m in iter(pat.scanner(text).match, None):
func = rules[keys[groupindex[m.lastgroup] - 1]]
if func is not None:
assert callable(func), 'func must be callable'
yield func(m.group(m.lastgroup))
class TypeParser(object):
"""A type parser for complex types.
Parameters
----------
text : str
The text to parse
Notes
-----
Adapted from David Beazley's and Brian Jones's Python Cookbook
"""
def __init__(self, text):
self.text = text
self.tokens = _generate_tokens(_TYPE_PATTERN, text)
self.tok = None
self.nexttok = None
def _advance(self):
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
if self.nexttok is not None and self.nexttok.type == toktype:
self._advance()
return True
return False
def _expect(self, toktype):
if not self._accept(toktype):
raise SyntaxError('Expected {0} after {1!r} in {2!r}'.format(
Tokens.name(toktype),
self.tok.value,
self.text,
))
def parse(self):
self._advance()
# any and null types cannot be nested
if self._accept(Tokens.ANY) or self._accept(Tokens.NULL):
return self.tok.value
t = self.type()
if self.nexttok is None:
return t
else:
# additional junk was passed at the end, throw an error
additional_tokens = []
while self.nexttok is not None:
additional_tokens.append(self.nexttok.value)
self._advance()
raise SyntaxError(
'Found additional tokens {0}'.format(additional_tokens)
)
def type(self):
"""
type : primitive
| decimal
| array
| map
| struct
primitive : "any"
| "null"
| "boolean"
| "int8"
| "int16"
| "int32"
| "int64"
| "float"
| "double"
| "string"
| "timestamp"
decimal : "decimal"
| "decimal" "(" integer "," integer ")"
integer : [0-9]+
array : "array" "<" type ">"
map : "map" "<" type "," type ">"
struct : "struct" "<" field ":" type ("," field ":" type)* ">"
field : [a-zA-Z_][a-zA-Z_0-9]*
"""
if self._accept(Tokens.PRIMITIVE):
return self.tok.value
elif self._accept(Tokens.DECIMAL):
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
precision = self.tok.value
self._expect(Tokens.COMMA)
self._expect(Tokens.INTEGER)
scale = self.tok.value
self._expect(Tokens.RPAREN)
else:
precision = 9
scale = 0
return Decimal(precision, scale)
elif self._accept(Tokens.VARCHAR) or self._accept(Tokens.CHAR):
# VARCHAR, VARCHAR(n), CHAR, and CHAR(n) all parse as STRING
if self._accept(Tokens.LPAREN):
self._expect(Tokens.INTEGER)
self._expect(Tokens.RPAREN)
return string
return string
elif self._accept(Tokens.ARRAY):
self._expect(Tokens.LBRACKET)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Array(value_type)
elif self._accept(Tokens.MAP):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.PRIMITIVE)
key_type = self.tok.value
self._expect(Tokens.COMMA)
value_type = self.type()
self._expect(Tokens.RBRACKET)
return Map(key_type, value_type)
elif self._accept(Tokens.STRUCT):
self._expect(Tokens.LBRACKET)
self._expect(Tokens.FIELD)
names = [self.tok.value]
self._expect(Tokens.COLON)
types = [self.type()]
while self._accept(Tokens.COMMA):
self._expect(Tokens.FIELD)
names.append(self.tok.value)
self._expect(Tokens.COLON)
types.append(self.type())
self._expect(Tokens.RBRACKET)
return Struct(names, types)
else:
raise SyntaxError('Type cannot be parsed: {0}'.format(self.text))
def validate_type(t):
if isinstance(t, DataType):
return t
return TypeParser(t).parse()
def array_type(t):
# compatibility
return validate_type(t).array_type()
def scalar_type(t):
# compatibility
return validate_type(t).scalar_type()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import json
import os
from six import iteritems
from six.moves.urllib.parse import urlencode
import frappe
from frappe import _, scrub
from frappe.core.doctype.file.file import get_max_file_size, remove_file_by_url
from frappe.custom.doctype.customize_form.customize_form import docfield_properties
from frappe.desk.form.meta import get_code_files_via_hooks
from frappe.integrations.utils import get_payment_gateway_controller
from frappe.modules.utils import export_module_json, get_doc_module
from frappe.utils import cstr
from frappe.website.utils import get_comment_list
from frappe.website.website_generator import WebsiteGenerator
class WebForm(WebsiteGenerator):
website = frappe._dict(
no_cache = 1
)
def onload(self):
super(WebForm, self).onload()
if self.is_standard and not frappe.conf.developer_mode:
self.use_meta_fields()
def validate(self):
super(WebForm, self).validate()
if not self.module:
self.module = frappe.db.get_value('DocType', self.doc_type, 'module')
if (not (frappe.flags.in_install or frappe.flags.in_patch or frappe.flags.in_test or frappe.flags.in_fixtures)
and self.is_standard and not frappe.conf.developer_mode):
frappe.throw(_("You need to be in developer mode to edit a Standard Web Form"))
if not frappe.flags.in_import:
self.validate_fields()
if self.accept_payment:
self.validate_payment_amount()
def validate_fields(self):
'''Validate all fields are present'''
from frappe.model import no_value_fields
missing = []
meta = frappe.get_meta(self.doc_type)
for df in self.web_form_fields:
if df.fieldname and (df.fieldtype not in no_value_fields and not meta.has_field(df.fieldname)):
missing.append(df.fieldname)
if missing:
frappe.throw(_('Following fields are missing:') + '<br>' + '<br>'.join(missing))
def validate_payment_amount(self):
if self.amount_based_on_field and not self.amount_field:
frappe.throw(_("Please select a Amount Field."))
elif not self.amount_based_on_field and not self.amount > 0:
frappe.throw(_("Amount must be greater than 0."))
def reset_field_parent(self):
'''Convert link fields to select with names as options'''
for df in self.web_form_fields:
df.parent = self.doc_type
def use_meta_fields(self):
'''Override default properties for standard web forms'''
meta = frappe.get_meta(self.doc_type)
for df in self.web_form_fields:
meta_df = meta.get_field(df.fieldname)
if not meta_df:
continue
for prop in docfield_properties:
if df.fieldtype==meta_df.fieldtype and prop not in ("idx",
"reqd", "default", "description", "default", "options",
"hidden", "read_only", "label"):
df.set(prop, meta_df.get(prop))
# TODO translate options of Select fields like Country
# export
def on_update(self):
"""
Writes the .txt for this page and if write_content is checked,
it will write out a .html file
"""
path = export_module_json(self, self.is_standard, self.module)
if path:
# js
if not os.path.exists(path + '.js'):
with open(path + '.js', 'w') as f:
f.write("""frappe.ready(function() {
// bind events here
})""")
# py
if not os.path.exists(path + '.py'):
with open(path + '.py', 'w') as f:
f.write("""from __future__ import unicode_literals
import frappe
def get_context(context):
# do your magic here
pass
""")
def get_context(self, context):
'''Build context to render the `web_form.html` template'''
self.set_web_form_module()
doc, delimeter = make_route_string(frappe.form_dict)
context.doc = doc
context.delimeter = delimeter
# check permissions
if frappe.session.user == "Guest" and frappe.form_dict.name:
frappe.throw(_("You need to be logged in to access this {0}.").format(self.doc_type), frappe.PermissionError)
if frappe.form_dict.name and not self.has_web_form_permission(self.doc_type, frappe.form_dict.name):
frappe.throw(_("You don't have the permissions to access this document"), frappe.PermissionError)
self.reset_field_parent()
if self.is_standard:
self.use_meta_fields()
if not frappe.session.user == "Guest":
if self.allow_edit:
if self.allow_multiple:
if not frappe.form_dict.name and not frappe.form_dict.new:
# list data is queried via JS
context.is_list = True
else:
if frappe.session.user != 'Guest' and not frappe.form_dict.name:
frappe.form_dict.name = frappe.db.get_value(self.doc_type, {"owner": frappe.session.user}, "name")
if not frappe.form_dict.name:
# only a single doc allowed and no existing doc, hence new
frappe.form_dict.new = 1
if frappe.form_dict.is_list:
context.is_list = True
# always render new form if login is not required or doesn't allow editing existing ones
if not self.login_required or not self.allow_edit:
frappe.form_dict.new = 1
self.load_document(context)
context.parents = self.get_parents(context)
if self.breadcrumbs:
context.parents = frappe.safe_eval(self.breadcrumbs, { "_": _ })
context.has_header = ((frappe.form_dict.name or frappe.form_dict.new)
and (frappe.session.user!="Guest" or not self.login_required))
if context.success_message:
context.success_message = frappe.db.escape(context.success_message.replace("\n",
"<br>")).strip("'")
self.add_custom_context_and_script(context)
if not context.max_attachment_size:
context.max_attachment_size = get_max_file_size() / 1024 / 1024
context.show_in_grid = self.show_in_grid
self.load_translations(context)
def load_translations(self, context):
translated_messages = frappe.translate.get_dict('doctype', self.doc_type)
# Sr is not added by default, had to be added manually
translated_messages['Sr'] = _('Sr')
context.translated_messages = frappe.as_json(translated_messages)
def load_document(self, context):
'''Load document `doc` and `layout` properties for template'''
if frappe.form_dict.name or frappe.form_dict.new:
context.layout = self.get_layout()
context.parents = [{"route": self.route, "label": _(self.title) }]
if frappe.form_dict.name:
context.doc = frappe.get_doc(self.doc_type, frappe.form_dict.name)
context.title = context.doc.get(context.doc.meta.get_title_field())
context.doc.add_seen()
context.reference_doctype = context.doc.doctype
context.reference_name = context.doc.name
if self.show_attachments:
context.attachments = frappe.get_all('File', filters= {"attached_to_name": context.reference_name, "attached_to_doctype": context.reference_doctype, "is_private": 0},
fields=['file_name','file_url', 'file_size'])
if self.allow_comments:
context.comment_list = get_comment_list(context.doc.doctype,
context.doc.name)
def get_payment_gateway_url(self, doc):
if self.accept_payment:
controller = get_payment_gateway_controller(self.payment_gateway)
title = "Payment for {0} {1}".format(doc.doctype, doc.name)
amount = self.amount
if self.amount_based_on_field:
amount = doc.get(self.amount_field)
payment_details = {
"amount": amount,
"title": title,
"description": title,
"reference_doctype": doc.doctype,
"reference_docname": doc.name,
"payer_email": frappe.session.user,
"payer_name": frappe.utils.get_fullname(frappe.session.user),
"order_id": doc.name,
"currency": self.currency,
"redirect_to": frappe.utils.get_url(self.success_url or self.route)
}
# Redirect the user to this url
return controller.get_payment_url(**payment_details)
def add_custom_context_and_script(self, context):
'''Update context from module if standard and append script'''
if self.web_form_module:
new_context = self.web_form_module.get_context(context)
if new_context:
context.update(new_context)
js_path = os.path.join(os.path.dirname(self.web_form_module.__file__), scrub(self.name) + '.js')
if os.path.exists(js_path):
script = frappe.render_template(open(js_path, 'r').read(), context)
for path in get_code_files_via_hooks("webform_include_js", context.doc_type):
custom_js = frappe.render_template(open(path, 'r').read(), context)
script = "\n\n".join([script, custom_js])
context.script = script
css_path = os.path.join(os.path.dirname(self.web_form_module.__file__), scrub(self.name) + '.css')
if os.path.exists(css_path):
style = open(css_path, 'r').read()
for path in get_code_files_via_hooks("webform_include_css", context.doc_type):
custom_css = open(path, 'r').read()
style = "\n\n".join([style, custom_css])
context.style = style
def get_layout(self):
layout = []
def add_page(df=None):
new_page = {'sections': []}
layout.append(new_page)
if df and df.fieldtype=='Page Break':
new_page.update(df.as_dict())
return new_page
def add_section(df=None):
new_section = {'columns': []}
if layout:
layout[-1]['sections'].append(new_section)
if df and df.fieldtype=='Section Break':
new_section.update(df.as_dict())
return new_section
def add_column(df=None):
new_col = []
if layout:
layout[-1]['sections'][-1]['columns'].append(new_col)
return new_col
page, section, column = None, None, None
for df in self.web_form_fields:
# breaks
if df.fieldtype=='Page Break':
page = add_page(df)
section, column = None, None
if df.fieldtype=='Section Break':
section = add_section(df)
column = None
if df.fieldtype=='Column Break':
column = add_column(df)
# input
if df.fieldtype not in ('Section Break', 'Column Break', 'Page Break'):
if not page:
page = add_page()
section, column = None, None
if not section:
section = add_section()
column = None
if column==None:
column = add_column()
column.append(df)
return layout
def get_parents(self, context):
parents = None
if context.is_list and not context.parents:
parents = [{"title": _("My Account"), "name": "me"}]
elif context.parents:
parents = context.parents
return parents
def set_web_form_module(self):
'''Get custom web form module if exists'''
self.web_form_module = self.get_web_form_module()
def get_web_form_module(self):
if self.is_standard:
return get_doc_module(self.module, self.doctype, self.name)
def validate_mandatory(self, doc):
'''Validate mandatory web form fields'''
missing = []
for f in self.web_form_fields:
if f.reqd and doc.get(f.fieldname) in (None, [], ''):
missing.append(f)
if missing:
frappe.throw(_('Mandatory Information missing:') + '<br><br>'
+ '<br>'.join(['{0} ({1})'.format(d.label, d.fieldtype) for d in missing]))
def allow_website_search_indexing(self):
return False
def has_web_form_permission(self, doctype, name, ptype='read'):
if frappe.session.user=="Guest":
return False
if self.apply_document_permissions:
return frappe.get_doc(doctype, name).has_permission()
# owner matches
elif frappe.db.get_value(doctype, name, "owner")==frappe.session.user:
return True
elif frappe.has_website_permission(name, ptype=ptype, doctype=doctype):
return True
elif check_webform_perm(doctype, name):
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def accept(web_form, data, docname=None, for_payment=False):
'''Save the web form'''
data = frappe._dict(json.loads(data))
for_payment = frappe.parse_json(for_payment)
files = []
files_to_delete = []
web_form = frappe.get_doc("Web Form", web_form)
if data.name and not web_form.allow_edit:
frappe.throw(_("You are not allowed to update this Web Form Document"))
frappe.flags.in_web_form = True
meta = frappe.get_meta(data.doctype)
if docname:
# update
doc = frappe.get_doc(data.doctype, docname)
else:
# insert
doc = frappe.new_doc(data.doctype)
# set values
for field in web_form.web_form_fields:
fieldname = field.fieldname
df = meta.get_field(fieldname)
value = data.get(fieldname, None)
if df and df.fieldtype in ('Attach', 'Attach Image'):
if value and 'data:' and 'base64' in value:
files.append((fieldname, value))
if not doc.name:
doc.set(fieldname, '')
continue
elif not value and doc.get(fieldname):
files_to_delete.append(doc.get(fieldname))
doc.set(fieldname, value)
if for_payment:
web_form.validate_mandatory(doc)
doc.run_method('validate_payment')
if doc.name:
if web_form.has_web_form_permission(doc.doctype, doc.name, "write"):
doc.save(ignore_permissions=True)
else:
# only if permissions are present
doc.save()
else:
# insert
if web_form.login_required and frappe.session.user=="Guest":
frappe.throw(_("You must login to submit this form"))
ignore_mandatory = True if files else False
doc.insert(ignore_permissions = True, ignore_mandatory = ignore_mandatory)
# add files
if files:
for f in files:
fieldname, filedata = f
# remove earlier attached file (if exists)
if doc.get(fieldname):
remove_file_by_url(doc.get(fieldname), doctype=doc.doctype, name=doc.name)
# save new file
filename, dataurl = filedata.split(',', 1)
_file = frappe.get_doc({
"doctype": "File",
"file_name": filename,
"attached_to_doctype": doc.doctype,
"attached_to_name": doc.name,
"content": dataurl,
"decode": True})
_file.save()
# update values
doc.set(fieldname, _file.file_url)
doc.save(ignore_permissions = True)
if files_to_delete:
for f in files_to_delete:
if f:
remove_file_by_url(f, doctype=doc.doctype, name=doc.name)
frappe.flags.web_form_doc = doc
if for_payment:
return web_form.get_payment_gateway_url(doc)
else:
return doc
@frappe.whitelist()
def delete(web_form_name, docname):
web_form = frappe.get_doc("Web Form", web_form_name)
owner = frappe.db.get_value(web_form.doc_type, docname, "owner")
if frappe.session.user == owner and web_form.allow_delete:
frappe.delete_doc(web_form.doc_type, docname, ignore_permissions=True)
else:
raise frappe.PermissionError("Not Allowed")
@frappe.whitelist()
def delete_multiple(web_form_name, docnames):
web_form = frappe.get_doc("Web Form", web_form_name)
docnames = json.loads(docnames)
allowed_docnames = []
restricted_docnames = []
for docname in docnames:
owner = frappe.db.get_value(web_form.doc_type, docname, "owner")
if frappe.session.user == owner and web_form.allow_delete:
allowed_docnames.append(docname)
else:
restricted_docnames.append(docname)
for docname in allowed_docnames:
frappe.delete_doc(web_form.doc_type, docname, ignore_permissions=True)
if restricted_docnames:
raise frappe.PermissionError("You do not have permisssion to delete " + ", ".join(restricted_docnames))
def check_webform_perm(doctype, name):
doc = frappe.get_doc(doctype, name)
if hasattr(doc, "has_webform_permission"):
if doc.has_webform_permission():
return True
@frappe.whitelist(allow_guest=True)
def get_web_form_filters(web_form_name):
web_form = frappe.get_doc("Web Form", web_form_name)
return [field for field in web_form.web_form_fields if field.show_in_filter]
def make_route_string(parameters):
route_string = ""
delimeter = '?'
if isinstance(parameters, dict):
for key in parameters:
if key != "web_form_name":
route_string += route_string + delimeter + key + "=" + cstr(parameters[key])
delimeter = '&'
return (route_string, delimeter)
@frappe.whitelist(allow_guest=True)
def get_form_data(doctype, docname=None, web_form_name=None):
web_form = frappe.get_doc('Web Form', web_form_name)
if web_form.login_required and frappe.session.user == 'Guest':
frappe.throw(_("Not Permitted"), frappe.PermissionError)
out = frappe._dict()
out.web_form = web_form
if frappe.session.user != 'Guest' and not docname and not web_form.allow_multiple:
docname = frappe.db.get_value(doctype, {"owner": frappe.session.user}, "name")
if docname:
doc = frappe.get_doc(doctype, docname)
if web_form.has_web_form_permission(doctype, docname, ptype='read'):
out.doc = doc
else:
frappe.throw(_("Not permitted"), frappe.PermissionError)
# For Table fields, server-side processing for meta
for field in out.web_form.web_form_fields:
if field.fieldtype == "Table":
field.fields = get_in_list_view_fields(field.options)
out.update({field.fieldname: field.fields})
if field.fieldtype == "Link":
field.fieldtype = "Autocomplete"
field.options = get_link_options(
web_form_name,
field.options,
field.allow_read_on_all_link_options
)
return out
@frappe.whitelist()
def get_in_list_view_fields(doctype):
meta = frappe.get_meta(doctype)
fields = []
if meta.title_field:
fields.append(meta.title_field)
else:
fields.append('name')
if meta.has_field('status'):
fields.append('status')
fields += [df.fieldname for df in meta.fields if df.in_list_view and df.fieldname not in fields]
def get_field_df(fieldname):
if fieldname == 'name':
return { 'label': 'Name', 'fieldname': 'name', 'fieldtype': 'Data' }
return meta.get_field(fieldname).as_dict()
return [get_field_df(f) for f in fields]
@frappe.whitelist(allow_guest=True)
def get_link_options(web_form_name, doctype, allow_read_on_all_link_options=False):
web_form_doc = frappe.get_doc("Web Form", web_form_name)
doctype_validated = False
limited_to_user = False
if web_form_doc.login_required:
# check if frappe session user is not guest or admin
if frappe.session.user != 'Guest':
doctype_validated = True
if not allow_read_on_all_link_options:
limited_to_user = True
else:
for field in web_form_doc.web_form_fields:
if field.options == doctype:
doctype_validated = True
break
if doctype_validated:
link_options = []
if limited_to_user:
link_options = "\n".join([doc.name for doc in frappe.get_all(doctype, filters = {"owner":frappe.session.user})])
else:
link_options = "\n".join([doc.name for doc in frappe.get_all(doctype)])
return link_options
else:
raise frappe.PermissionError('Not Allowed, {0}'.format(doctype))
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from functools import partial
import re
import warnings
from ._compat import range_type, text_type, PY2
from . import err
#: Regular expression for :meth:`Cursor.executemany`.
#: executemany only suports simple bulk insert.
#: You can use it to load large dataset.
RE_INSERT_VALUES = re.compile(
r"\s*((?:INSERT|REPLACE)\s.+\sVALUES?\s+)" +
r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" +
r"(\s*(?:ON DUPLICATE.*)?)\Z",
re.IGNORECASE | re.DOTALL)
class Cursor(object):
"""
This is the object you use to interact with the database.
"""
#: Max statement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
_defer_warnings = False
def __init__(self, connection):
"""
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
"""
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
self._warnings_handled = False
def close(self):
"""
Closing a cursor just exhausts all remaining data.
"""
conn = self.connection
if conn is None:
return
try:
while self.nextset():
pass
finally:
self.connection = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _nextset(self, unbuffered=False):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
# for unbuffered queries warnings are only available once whole result has been read
if unbuffered:
self._show_warnings()
if current_result is None or current_result is not conn._result:
return None
if not current_result.has_next:
return None
conn.next_result(unbuffered=unbuffered)
self._do_get_result()
return True
def nextset(self):
return self._nextset(False)
def _ensure_bytes(self, x, encoding=None):
if isinstance(x, text_type):
x = x.encode(encoding)
elif isinstance(x, (tuple, list)):
x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x)
return x
def _escape_args(self, args, conn):
ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding)
if isinstance(args, (tuple, list)):
if PY2:
args = tuple(map(ensure_bytes, args))
return tuple(conn.literal(arg) for arg in args)
elif isinstance(args, dict):
if PY2:
args = dict((ensure_bytes(key), ensure_bytes(val)) for
(key, val) in args.items())
return dict((key, conn.literal(val)) for (key, val) in args.items())
else:
# If it's not a dictionary let's try escaping it anyways.
# Worst case it will throw a Value error
if PY2:
args = ensure_bytes(args)
return conn.escape(args)
def mogrify(self, query, args=None):
"""
Returns the exact string that is sent to the database by calling the
execute() method.
This method follows the extension to the DB API 2.0 followed by Psycopg.
"""
conn = self._get_db()
if PY2: # Use bytes on Python 2 always
query = self._ensure_bytes(query, encoding=conn.encoding)
if args is not None:
query = query % self._escape_args(args, conn)
return query
def execute(self, query, args=None):
"""Execute a query
:param str query: Query to execute.
:param args: parameters used with query. (optional)
:type args: tuple, list or dict
:return: Number of affected rows
:rtype: int
If args is a list or tuple, %s can be used as a placeholder in the query.
If args is a dict, %(name)s can be used as a placeholder in the query.
"""
while self.nextset():
pass
query = self.mogrify(query, args)
result = self._query(query)
self._executed = query
return result
def executemany(self, query, args):
# type: (str, list) -> int
"""Run several data against one query
:param query: query to execute on server
:param args: Sequence of sequences or mappings. It is used as parameter.
:return: Number of rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1) % ()
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
return self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
self.rowcount = sum(self.execute(query, arg) for arg in args)
return self.rowcount
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if PY2 and isinstance(values, text_type):
values = values.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length:
rows += self.execute(sql + postfix)
sql = bytearray(prefix)
else:
sql += b','
sql += v
rows += self.execute(sql + postfix)
self.rowcount = rows
return rows
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
self._query(q)
self._executed = q
return args
def fetchone(self):
"""Fetch the next row"""
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
"""Fetch several rows"""
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetch all the rows"""
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q)
self._do_get_result()
return self.rowcount
def _do_get_result(self):
conn = self._get_db()
self.rownumber = 0
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
self._warnings_handled = False
if not self._defer_warnings:
self._show_warnings()
def _show_warnings(self):
if self._warnings_handled:
return
self._warnings_handled = True
if self._result and (self._result.has_next or not self._result.warning_count):
return
ws = self._get_db().show_warnings()
if ws is None:
return
for w in ws:
msg = w[-1]
if PY2:
if isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(err.Warning(*w[1:3]), stacklevel=4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class DictCursorMixin(object):
# You can override this to use OrderedDict or other dict-like types.
dict_type = dict
def _do_get_result(self):
super(DictCursorMixin, self)._do_get_result()
fields = []
if self.description:
for f in self._result.fields:
name = f.name
if name in fields:
name = f.table_name + '.' + name
fields.append(name)
self._fields = fields
if fields and self._rows:
self._rows = [self._conv_row(r) for r in self._rows]
def _conv_row(self, row):
if row is None:
return None
return self.dict_type(zip(self._fields, row))
class DictCursor(DictCursorMixin, Cursor):
"""A cursor which returns results as a dictionary"""
class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this is the client uses much less memory,
and rows are returned much faster when traveling over a slow network
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
_defer_warnings = True
def _conv_row(self, row):
return row
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
self._result._finish_unbuffered_query()
try:
while self.nextset():
pass
finally:
self.connection = None
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q, unbuffered=True)
self._do_get_result()
return self.rowcount
def nextset(self):
return self._nextset(unbuffered=True)
def read_next(self):
"""Read next row"""
return self._conv_row(self._result._read_rowdata_packet_unbuffered())
def fetchone(self):
"""Fetch next row"""
self._check_executed()
row = self.read_next()
if row is None:
self._show_warnings()
return None
self.rownumber += 1
return row
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
return list(self.fetchall_unbuffered())
def fetchall_unbuffered(self):
"""
Fetch all, implemented as a generator, which isn't to standard,
however, it doesn't make sense to return everything in a list, as that
would use ridiculous memory for large result sets.
"""
return iter(self.fetchone, None)
def __iter__(self):
return self.fetchall_unbuffered()
def fetchmany(self, size=None):
"""Fetch many"""
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = self.read_next()
if row is None:
self._show_warnings()
break
rows.append(row)
self.rownumber += 1
return rows
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
class SSDictCursor(DictCursorMixin, SSCursor):
"""An unbuffered cursor, which returns results as a dictionary"""
|
|
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
attrs_pattern = r"""
\s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value"
| # OR
\s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+) # attr=value
| # OR
\s+(?P<attr2>[^>"'/= ]+) # attr
"""
left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % attrs_pattern
attrs_re = re.compile(attrs_pattern, re.VERBOSE)
left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
markdown_in_raw = False
def _get_left_tag(self, block):
m = self.left_tag_re.match(block)
if m:
tag = m.group('tag')
raw_attrs = m.group('attrs')
attrs = {}
if raw_attrs:
for ma in self.attrs_re.finditer(raw_attrs):
if ma.group('attr'):
if ma.group('value'):
attrs[ma.group('attr').strip()] = ma.group('value')
else:
attrs[ma.group('attr').strip()] = ""
elif ma.group('attr1'):
if ma.group('value1'):
attrs[ma.group('attr1').strip()] = ma.group('value1')
else:
attrs[ma.group('attr1').strip()] = ""
elif ma.group('attr2'):
attrs[ma.group('attr2').strip()] = ""
return tag, len(m.group(0)), attrs
else:
tag = block[1:].replace(">", " ", 1).split()[0].lower()
return tag, len(tag+2), {}
def _get_right_tag(self, left_tag, left_index, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + left_index-2
return block.rstrip()[-left_index:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag, left_index, attrs = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', block[:left_index])
end = block[-len(right_tag)-2:]
block = block[left_index:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.append(block)
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else:
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block)
right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:]
items[-1] = items[-1][:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.extend(items)
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:]
items[-1] = items[-1][:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.extend(items)
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
#new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CipherBackend(object):
@abc.abstractmethod
def cipher_supported(self, cipher, mode):
"""
Return True if the given cipher and mode are supported.
"""
@abc.abstractmethod
def create_symmetric_encryption_ctx(self, cipher, mode):
"""
Get a CipherContext that can be used for encryption.
"""
@abc.abstractmethod
def create_symmetric_decryption_ctx(self, cipher, mode):
"""
Get a CipherContext that can be used for decryption.
"""
@six.add_metaclass(abc.ABCMeta)
class HashBackend(object):
@abc.abstractmethod
def hash_supported(self, algorithm):
"""
Return True if the hash algorithm is supported by this backend.
"""
@abc.abstractmethod
def create_hash_ctx(self, algorithm):
"""
Create a HashContext for calculating a message digest.
"""
@six.add_metaclass(abc.ABCMeta)
class HMACBackend(object):
@abc.abstractmethod
def hmac_supported(self, algorithm):
"""
Return True if the hash algorithm is supported for HMAC by this
backend.
"""
@abc.abstractmethod
def create_hmac_ctx(self, key, algorithm):
"""
Create a MACContext for calculating a message authentication code.
"""
@six.add_metaclass(abc.ABCMeta)
class CMACBackend(object):
@abc.abstractmethod
def cmac_algorithm_supported(self, algorithm):
"""
Returns True if the block cipher is supported for CMAC by this backend
"""
@abc.abstractmethod
def create_cmac_ctx(self, algorithm):
"""
Create a MACContext for calculating a message authentication code.
"""
@six.add_metaclass(abc.ABCMeta)
class PBKDF2HMACBackend(object):
@abc.abstractmethod
def pbkdf2_hmac_supported(self, algorithm):
"""
Return True if the hash algorithm is supported for PBKDF2 by this
backend.
"""
@abc.abstractmethod
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
"""
Return length bytes derived from provided PBKDF2 parameters.
"""
@six.add_metaclass(abc.ABCMeta)
class RSABackend(object):
@abc.abstractmethod
def generate_rsa_private_key(self, public_exponent, key_size):
"""
Generate an RSAPrivateKey instance with public_exponent and a modulus
of key_size bits.
"""
@abc.abstractmethod
def rsa_padding_supported(self, padding):
"""
Returns True if the backend supports the given padding options.
"""
@abc.abstractmethod
def generate_rsa_parameters_supported(self, public_exponent, key_size):
"""
Returns True if the backend supports the given parameters for key
generation.
"""
@abc.abstractmethod
def load_rsa_private_numbers(self, numbers):
"""
Returns an RSAPrivateKey provider.
"""
@abc.abstractmethod
def load_rsa_public_numbers(self, numbers):
"""
Returns an RSAPublicKey provider.
"""
@six.add_metaclass(abc.ABCMeta)
class DSABackend(object):
@abc.abstractmethod
def generate_dsa_parameters(self, key_size):
"""
Generate a DSAParameters instance with a modulus of key_size bits.
"""
@abc.abstractmethod
def generate_dsa_private_key(self, parameters):
"""
Generate a DSAPrivateKey instance with parameters as a DSAParameters
object.
"""
@abc.abstractmethod
def generate_dsa_private_key_and_parameters(self, key_size):
"""
Generate a DSAPrivateKey instance using key size only.
"""
@abc.abstractmethod
def dsa_hash_supported(self, algorithm):
"""
Return True if the hash algorithm is supported by the backend for DSA.
"""
@abc.abstractmethod
def dsa_parameters_supported(self, p, q, g):
"""
Return True if the parameters are supported by the backend for DSA.
"""
@abc.abstractmethod
def load_dsa_private_numbers(self, numbers):
"""
Returns a DSAPrivateKey provider.
"""
@abc.abstractmethod
def load_dsa_public_numbers(self, numbers):
"""
Returns a DSAPublicKey provider.
"""
@abc.abstractmethod
def load_dsa_parameter_numbers(self, numbers):
"""
Returns a DSAParameters provider.
"""
@six.add_metaclass(abc.ABCMeta)
class EllipticCurveBackend(object):
@abc.abstractmethod
def elliptic_curve_signature_algorithm_supported(
self, signature_algorithm, curve
):
"""
Returns True if the backend supports the named elliptic curve with the
specified signature algorithm.
"""
@abc.abstractmethod
def elliptic_curve_supported(self, curve):
"""
Returns True if the backend supports the named elliptic curve.
"""
@abc.abstractmethod
def generate_elliptic_curve_private_key(self, curve):
"""
Return an object conforming to the EllipticCurvePrivateKey interface.
"""
@abc.abstractmethod
def load_elliptic_curve_public_numbers(self, numbers):
"""
Return an EllipticCurvePublicKey provider using the given numbers.
"""
@abc.abstractmethod
def load_elliptic_curve_private_numbers(self, numbers):
"""
Return an EllipticCurvePrivateKey provider using the given numbers.
"""
@abc.abstractmethod
def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve):
"""
Returns whether the exchange algorithm is supported by this backend.
"""
@six.add_metaclass(abc.ABCMeta)
class PEMSerializationBackend(object):
@abc.abstractmethod
def load_pem_private_key(self, data, password):
"""
Loads a private key from PEM encoded data, using the provided password
if the data is encrypted.
"""
@abc.abstractmethod
def load_pem_public_key(self, data):
"""
Loads a public key from PEM encoded data.
"""
@six.add_metaclass(abc.ABCMeta)
class DERSerializationBackend(object):
@abc.abstractmethod
def load_der_private_key(self, data, password):
"""
Loads a private key from DER encoded data. Uses the provided password
if the data is encrypted.
"""
@abc.abstractmethod
def load_der_public_key(self, data):
"""
Loads a public key from DER encoded data.
"""
@six.add_metaclass(abc.ABCMeta)
class X509Backend(object):
@abc.abstractmethod
def load_pem_x509_certificate(self, data):
"""
Load an X.509 certificate from PEM encoded data.
"""
@abc.abstractmethod
def load_der_x509_certificate(self, data):
"""
Load an X.509 certificate from DER encoded data.
"""
@abc.abstractmethod
def load_der_x509_csr(self, data):
"""
Load an X.509 CSR from DER encoded data.
"""
@abc.abstractmethod
def load_pem_x509_csr(self, data):
"""
Load an X.509 CSR from PEM encoded data.
"""
@abc.abstractmethod
def create_x509_csr(self, builder, private_key, algorithm):
"""
Create and sign an X.509 CSR from a CSR builder object.
"""
@abc.abstractmethod
def create_x509_certificate(self, builder, private_key, algorithm):
"""
Create and sign an X.509 certificate from a CertificateBuilder object.
"""
@abc.abstractmethod
def create_x509_crl(self, builder, private_key, algorithm):
"""
Create and sign an X.509 CertificateRevocationList from a
CertificateRevocationListBuilder object.
"""
@abc.abstractmethod
def create_x509_revoked_certificate(self, builder):
"""
Create a RevokedCertificate object from a RevokedCertificateBuilder
object.
"""
@six.add_metaclass(abc.ABCMeta)
class DHBackend(object):
@abc.abstractmethod
def generate_dh_parameters(self, key_size):
"""
Generate a DHParameters instance with a modulus of key_size bits.
"""
@abc.abstractmethod
def generate_dh_private_key(self, parameters):
"""
Generate a DHPrivateKey instance with parameters as a DHParameters
object.
"""
@abc.abstractmethod
def generate_dh_private_key_and_parameters(self, key_size):
"""
Generate a DHPrivateKey instance using key size only.
"""
@abc.abstractmethod
def load_dh_private_numbers(self, numbers):
"""
Returns a DHPrivateKey provider.
"""
@abc.abstractmethod
def load_dh_public_numbers(self, numbers):
"""
Returns a DHPublicKey provider.
"""
@abc.abstractmethod
def load_dh_parameter_numbers(self, numbers):
"""
Returns a DHParameters provider.
"""
@abc.abstractmethod
def dh_exchange_algorithm_supported(self, exchange_algorithm):
"""
Returns whether the exchange algorithm is supported by this backend.
"""
@abc.abstractmethod
def dh_parameters_supported(self, p, g):
"""
Returns whether the backend supports DH with these parameter values.
"""
|
|
import os.path
import random
import time
import config
import options
from constants import NEWLINE_REPLACEMENT, SPACE_REPLACEMENT, VIRTUAL_TIME_INTERVAL
from definitions import Style
from lib.log import debug, info, warning
from lib.msgs import insert_silences, nb2msg
from paths import TMP_PATH
import res
import version
DEBUG_MODE = version.IS_DEV_VERSION
class _State(object):
def send_menu(self, client):
pass
class Anonymous(_State):
allowed_commands = ("login", )
class InTheLobby(_State):
allowed_commands = ("create", "register", "quit", "say")
def send_menu(self, client):
client.send_maps()
client.send_invitations()
client.push("update_menu\n")
class OrganizingAGame(_State):
allowed_commands = ("invite", "invite_easy", "invite_aggressive",
"move_to_alliance",
"start", "cancel_game", "say",
"faction")
def send_menu(self, client):
client.push("available_players %s\n" % " ".join(
[p.login for p in client.server.available_players(client)
if p not in client.game.guests]))
client.push("registered_players %s\n" % " ".join(["%s,%s,%s" % (p.login, p.alliance, p.faction) for p in client.game.players]))
client.push("update_menu\n")
class WaitingForTheGameToStart(_State):
allowed_commands = ("unregister", "say", "faction")
def send_menu(self, client):
client.push("registered_players %s\n" % " ".join(["%s,%s,%s" % (p.login, p.alliance, p.faction) for p in client.game.players]))
client.push("update_menu\n")
class Playing(_State):
allowed_commands = ("orders", "quit_game", "abort_game", "debug_info",
"say")
class _Computer(object):
login = "ai"
def __init__(self, level):
self.level = level
def login_to_send(self):
return self.login + "_" + self.level
def push(self, msg):
pass
def send_msg(self, l):
pass
def send_menu(self):
pass
def is_compatible(self, client):
return True
class Game(object):
started = False
speed = 1
def __init__(self, scenario, speed, server, admin, is_public=False):
self.id = server.get_next_id()
self.scenario = scenario
self.speed = speed
self.server = server
self.admin = admin
self.is_public = is_public
self.players = []
self.guests = []
self.register(admin)
if self.is_public:
self._process_public_game()
def _process_public_game(self):
for player in self.server.available_players():
if player.is_compatible(self.admin):
self.invite(player)
def notify_connection_of(self, client):
if self.is_public and self.can_register() and client.is_compatible(self.admin):
self.invite(client)
def _delay(self):
max_delay = max([p.delay for p in self.human_players])
if max_delay > .6:
info("max_delay=%s => max_delay=.6")
max_delay = .6
turn_duration = VIRTUAL_TIME_INTERVAL / 1000.0 / float(self.speed)
nb_turns = int(max_delay / turn_duration) + 1
info("max_delay=%s turn_duration=%s => %s buffered turns", max_delay,
turn_duration, nb_turns)
return nb_turns
def _start(self):
if options.record_games:
self.f = open(os.path.join(TMP_PATH, "game%s-%s.txt" % (self.id, int(time.time()))), "w")
info("start game %s on map %s with players %s",
self.id,
self.scenario.get_name(),
" ".join([p.login_to_send() for p in self.players]))
self.guests = []
self.started = True
self.human_players = [p for p in self.players if not isinstance(p, _Computer)]
self.time = 0
random.seed()
seed = random.randint(0, 10000)
# init self.all_orders
self.all_orders = {}
for client in self.human_players:
self.all_orders[client] = []
# send first orders (if menu, the advance in the delay isn't lost)
delay = self._delay()
for client in self.human_players:
client.push("start_game %s %s %s %s\n" %
(";".join(["%s,%s,%s" % (p.login_to_send(), p.alliance,
p.faction)
for p in self.players]),
client.login,
seed,
self.speed,
)
)
for _ in range(delay):
self.orders(client, ["update" + NEWLINE_REPLACEMENT, None])
client.state = Playing()
if options.record_games:
self.f.write("start_game %s %s %s\n" %
(";".join(["%s,%s" % (p.login_to_send(), p.alliance)
for p in self.players]),
seed,
self.scenario.get_name(),
)
)
self.players = [] # remove the players from the registered players list
self.server.log_status()
def start(self):
if self.scenario.nb_players_min <= len(self.players) <= self.scenario.nb_players_max:
self._start()
else:
debug("couldn't start game: bad number of players")
def quit_game(self, client): # called by a client already out of the game interface
info("%s has quit from game %s after %s turns", client.login, self.id, self.time)
self.human_players.remove(client)
client.state = InTheLobby()
if self.human_players:
# remove the queue, and update the orders
del self.all_orders[client]
self._dispatch_orders_if_needed()
else:
self.close()
def abort_game(self, client): # called by a client already out of the game interface
info("%s has disconnected from game %s after %s turns", client.login, self.id, self.time)
self.human_players.remove(client)
client.state = InTheLobby() # useful if the client just aborted a game but has not disconnected
if self.human_players:
# give the last order for the other players
for p in self.human_players:
self.all_orders[p].insert(0, ["update" + NEWLINE_REPLACEMENT, None])
self.all_orders[client].insert(0, ["quit" + NEWLINE_REPLACEMENT, None])
self._dispatch_orders_if_needed()
# remove the queue, and update the orders
del self.all_orders[client]
self._dispatch_orders_if_needed()
else:
self.close()
def get_nb_minutes(self):
return self.time * VIRTUAL_TIME_INTERVAL / 1000.0 / 60.0
def get_status_msg(self):
return [4018] + self.scenario.title + [9999]\
+ insert_silences([p.login for p in self.human_players]) + [9999]\
+ nb2msg(self.get_nb_minutes()) + [65]
def close(self):
info("closed game %s after %s turns (played for %s minutes)", self.id,
self.time, self.get_nb_minutes())
self.cancel()
self.server.log_status()
if options.record_games:
self.f.close()
_nb_allowed_alerts = 1
def _process_check_strings(self):
check_strings = [queue[0][1] for queue in self.all_orders.values()]
if check_strings.count(check_strings[0]) != len(check_strings) \
and self._nb_allowed_alerts > 0:
time_strings = [s.split("-", 1) for s in check_strings]
if time_strings.count(time_strings[0]) != len(time_strings):
if DEBUG_MODE:
info("minor mismatch in game %s at %s", self.id, self.time)
return
warning("mismatch in game %s at %s: %s",
self.id, self.time, check_strings)
self._nb_allowed_alerts -= 1
for p in self.human_players:
if not p.is_disconnected:
p.push("synchronization_error\n")
if "None" in check_strings:
warning("check string for game %s == 'None'", self.id)
def orders(self, client, args):
self.all_orders[client].append(args)
self._dispatch_orders_if_needed()
def _orders_are_ready(self):
for queue in self.all_orders.values():
if not queue:
return False
return True
def _dispatch_orders_if_needed(self):
debug("dispatch orders if needed")
while self._orders_are_ready():
log_this = False
debug(">> orders are ready")
self._process_check_strings()
# remove orders from the queue and pack them
_all_orders = []
for player, queue in self.all_orders.items():
orders = queue.pop(0)[0]
if SPACE_REPLACEMENT in orders:
log_this = True
_all_orders.append("%s/%s" % (player.login, orders))
all_orders = " ".join(_all_orders)
# send orders
for p in self.human_players:
if not p.is_disconnected:
debug("send all_orders to %s", p.login)
p.push("all_orders %s\n" % all_orders)
else:
debug("don't send all_orders to %s", p.login)
if log_this and options.record_games:
self.f.write("%s: all_orders %s\n" % (self.time, all_orders.replace(NEWLINE_REPLACEMENT, ";").replace(SPACE_REPLACEMENT, ",").replace("update;", "")))
self.time += 1
def invite(self, client):
self.guests.append(client)
client.send_msg([self.admin.login, 4243] + self.scenario.title)
def invite_computer(self, level):
if "admin_only" in self.server.parameters or \
len(self.players) > 1: # at least two human players if public server
self.register(_Computer(level))
else:
self.admin.send_msg([1029]) # hostile sound
def uninvite(self, client):
self.guests.remove(client)
def move_to_alliance(self, player_index, alliance):
player = self.players[int(player_index)]
player.alliance = int(alliance)
self.broadcast([4284, player.login, 4285] + nb2msg(player.alliance))
def set_faction(self, player_index, faction):
player = self.players[int(player_index)]
player.faction = faction
style = Style()
style.load(res.get_text_file("ui/style", append=True, localize=True))
faction_name = style.get(player.faction, 'title')
self.broadcast([player.login, ] + faction_name)
def broadcast(self, msg):
for client in self.players:
client.send_msg(msg)
def can_register(self):
return not self.started and len(self.players) < self.scenario.nb_players_max
def register(self, client):
if self.can_register():
for n in range(len(self.players) + 1):
n += 1
if n not in [p.alliance for p in self.players]:
break
self.players.append(client)
client.game = self
client.alliance = n
client.faction = "random_faction"
self.broadcast([client.login, 4241] + self.status())
def status(self):
assert not self.started
msg = nb2msg(len(self.players)) + [4242] + nb2msg(self.scenario.nb_players_max)
if len(self.players) >= self.scenario.nb_players_min:
msg += [4063]
else:
msg += [4244] + nb2msg(self.scenario.nb_players_min)
msg += [9999] + insert_silences([p.login for p in self.players])
return msg
def unregister(self, client):
self.players.remove(client)
if not client.is_disconnected:
client.push("quit\n")
client.state = InTheLobby()
def cancel(self):
for c in self.players[:]:
self.unregister(c)
for c in self.guests[:]:
self.uninvite(c)
self.server.games.remove(self)
|
|
# -*- coding: utf-8 -*-
"""
sphinx.websupport
~~~~~~~~~~~~~~~~~
Base Module for web support functions.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import posixpath
from os import path
from six.moves import cPickle as pickle
from jinja2 import Environment, FileSystemLoader
from docutils.core import publish_parts
from sphinx.application import Sphinx
from sphinx.locale import _
from sphinx.util.osutil import ensuredir
from sphinx.util.jsonimpl import dumps as dump_json
from sphinx.util.pycompat import htmlescape
from sphinx.websupport import errors
from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
from sphinx.websupport.storage import StorageBackend
class WebSupport(object):
"""The main API class for the web support package. All interactions
with the web support package should occur through this class.
"""
def __init__(self,
srcdir=None, # only required for building
builddir='', # the dir with data/static/doctrees subdirs
datadir=None, # defaults to builddir/data
staticdir=None, # defaults to builddir/static
doctreedir=None, # defaults to builddir/doctrees
search=None, # defaults to no search
storage=None, # defaults to SQLite in datadir
status=sys.stdout,
warning=sys.stderr,
moderation_callback=None,
allow_anonymous_comments=True,
docroot='',
staticroot='static',
):
# directories
self.srcdir = srcdir
self.builddir = builddir
self.outdir = path.join(builddir, 'data')
self.datadir = datadir or self.outdir
self.staticdir = staticdir or path.join(self.builddir, 'static')
self.doctreedir = staticdir or path.join(self.builddir, 'doctrees')
# web server virtual paths
self.staticroot = staticroot.strip('/')
self.docroot = docroot.strip('/')
self.status = status
self.warning = warning
self.moderation_callback = moderation_callback
self.allow_anonymous_comments = allow_anonymous_comments
self._init_templating()
self._init_search(search)
self._init_storage(storage)
self._globalcontext = None
self._make_base_comment_options()
def _init_storage(self, storage):
if isinstance(storage, StorageBackend):
self.storage = storage
else:
# If a StorageBackend isn't provided, use the default
# SQLAlchemy backend.
from sphinx.websupport.storage.sqlalchemystorage \
import SQLAlchemyStorage
if not storage:
# no explicit DB path given; create default sqlite database
db_path = path.join(self.datadir, 'db', 'websupport.db')
ensuredir(path.dirname(db_path))
storage = 'sqlite:///' + db_path
self.storage = SQLAlchemyStorage(storage)
def _init_templating(self):
import sphinx
template_path = path.join(sphinx.package_dir,
'themes', 'basic')
loader = FileSystemLoader(template_path)
self.template_env = Environment(loader=loader)
def _init_search(self, search):
if isinstance(search, BaseSearch):
self.search = search
else:
mod, cls = SEARCH_ADAPTERS[search or 'null']
mod = 'sphinx.websupport.search.' + mod
SearchClass = getattr(__import__(mod, None, None, [cls]), cls)
search_path = path.join(self.datadir, 'search')
self.search = SearchClass(search_path)
self.results_template = \
self.template_env.get_template('searchresults.html')
def build(self):
"""Build the documentation. Places the data into the `outdir`
directory. Use it like this::
support = WebSupport(srcdir, builddir, search='xapian')
support.build()
This will read reStructured text files from `srcdir`. Then it will
build the pickles and search index, placing them into `builddir`.
It will also save node data to the database.
"""
if not self.srcdir:
raise RuntimeError('No srcdir associated with WebSupport object')
app = Sphinx(self.srcdir, self.srcdir, self.outdir, self.doctreedir,
'websupport', status=self.status, warning=self.warning)
app.builder.set_webinfo(self.staticdir, self.staticroot,
self.search, self.storage)
self.storage.pre_build()
app.build()
self.storage.post_build()
def get_globalcontext(self):
"""Load and return the "global context" pickle."""
if not self._globalcontext:
infilename = path.join(self.datadir, 'globalcontext.pickle')
with open(infilename, 'rb') as f:
self._globalcontext = pickle.load(f)
return self._globalcontext
def get_document(self, docname, username='', moderator=False):
"""Load and return a document from a pickle. The document will
be a dict object which can be used to render a template::
support = WebSupport(datadir=datadir)
support.get_document('index', username, moderator)
In most cases `docname` will be taken from the request path and
passed directly to this function. In Flask, that would be something
like this::
@app.route('/<path:docname>')
def index(docname):
username = g.user.name if g.user else ''
moderator = g.user.moderator if g.user else False
try:
document = support.get_document(docname, username,
moderator)
except DocumentNotFoundError:
abort(404)
render_template('doc.html', document=document)
The document dict that is returned contains the following items
to be used during template rendering.
* **body**: The main body of the document as HTML
* **sidebar**: The sidebar of the document as HTML
* **relbar**: A div containing links to related documents
* **title**: The title of the document
* **css**: Links to css files used by Sphinx
* **script**: Javascript containing comment options
This raises :class:`~sphinx.websupport.errors.DocumentNotFoundError`
if a document matching `docname` is not found.
:param docname: the name of the document to load.
"""
docpath = path.join(self.datadir, 'pickles', docname)
if path.isdir(docpath):
infilename = docpath + '/index.fpickle'
if not docname:
docname = 'index'
else:
docname += '/index'
else:
infilename = docpath + '.fpickle'
try:
with open(infilename, 'rb') as f:
document = pickle.load(f)
except IOError:
raise errors.DocumentNotFoundError(
'The document "%s" could not be found' % docname)
comment_opts = self._make_comment_options(username, moderator)
comment_meta = self._make_metadata(
self.storage.get_metadata(docname, moderator))
document['script'] = comment_opts + comment_meta + document['script']
return document
def get_search_results(self, q):
"""Perform a search for the query `q`, and create a set
of search results. Then render the search results as html and
return a context dict like the one created by
:meth:`get_document`::
document = support.get_search_results(q)
:param q: the search query
"""
results = self.search.query(q)
ctx = {
'q': q,
'search_performed': True,
'search_results': results,
'docroot': '../', # XXX
'_': _,
}
document = {
'body': self.results_template.render(ctx),
'title': 'Search Results',
'sidebar': '',
'relbar': ''
}
return document
def get_data(self, node_id, username=None, moderator=False):
"""Get the comments and source associated with `node_id`. If
`username` is given vote information will be included with the
returned comments. The default CommentBackend returns a dict with
two keys, *source*, and *comments*. *source* is raw source of the
node and is used as the starting point for proposals a user can
add. *comments* is a list of dicts that represent a comment, each
having the following items:
============= ======================================================
Key Contents
============= ======================================================
text The comment text.
username The username that was stored with the comment.
id The comment's unique identifier.
rating The comment's current rating.
age The time in seconds since the comment was added.
time A dict containing time information. It contains the
following keys: year, month, day, hour, minute, second,
iso, and delta. `iso` is the time formatted in ISO
8601 format. `delta` is a printable form of how old
the comment is (e.g. "3 hours ago").
vote If `user_id` was given, this will be an integer
representing the vote. 1 for an upvote, -1 for a
downvote, or 0 if unvoted.
node The id of the node that the comment is attached to.
If the comment's parent is another comment rather than
a node, this will be null.
parent The id of the comment that this comment is attached
to if it is not attached to a node.
children A list of all children, in this format.
proposal_diff An HTML representation of the differences between the
the current source and the user's proposed source.
============= ======================================================
:param node_id: the id of the node to get comments for.
:param username: the username of the user viewing the comments.
:param moderator: whether the user is a moderator.
"""
return self.storage.get_data(node_id, username, moderator)
def delete_comment(self, comment_id, username='', moderator=False):
"""Delete a comment.
If `moderator` is True, the comment and all descendants will be deleted
from the database, and the function returns ``True``.
If `moderator` is False, the comment will be marked as deleted (but not
removed from the database so as not to leave any comments orphaned), but
only if the `username` matches the `username` on the comment. The
username and text files are replaced with "[deleted]" . In this case,
the function returns ``False``.
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
if moderator is False and `username` doesn't match username on the
comment.
:param comment_id: the id of the comment to delete.
:param username: the username requesting the deletion.
:param moderator: whether the requestor is a moderator.
"""
return self.storage.delete_comment(comment_id, username, moderator)
def add_comment(self, text, node_id='', parent_id='', displayed=True,
username=None, time=None, proposal=None,
moderator=False):
"""Add a comment to a node or another comment. Returns the comment
in the same format as :meth:`get_comments`. If the comment is being
attached to a node, pass in the node's id (as a string) with the
node keyword argument::
comment = support.add_comment(text, node_id=node_id)
If the comment is the child of another comment, provide the parent's
id (as a string) with the parent keyword argument::
comment = support.add_comment(text, parent_id=parent_id)
If you would like to store a username with the comment, pass
in the optional `username` keyword argument::
comment = support.add_comment(text, node=node_id,
username=username)
:param parent_id: the prefixed id of the comment's parent.
:param text: the text of the comment.
:param displayed: for moderation purposes
:param username: the username of the user making the comment.
:param time: the time the comment was created, defaults to now.
"""
if username is None:
if self.allow_anonymous_comments:
username = 'Anonymous'
else:
raise errors.UserNotAuthorizedError()
parsed = self._parse_comment_text(text)
comment = self.storage.add_comment(parsed, displayed, username,
time, proposal, node_id,
parent_id, moderator)
comment['original_text'] = text
if not displayed and self.moderation_callback:
self.moderation_callback(comment)
return comment
def process_vote(self, comment_id, username, value):
"""Process a user's vote. The web support package relies
on the API user to perform authentication. The API user will
typically receive a comment_id and value from a form, and then
make sure the user is authenticated. A unique username must be
passed in, which will also be used to retrieve the user's past
voting data. An example, once again in Flask::
@app.route('/docs/process_vote', methods=['POST'])
def process_vote():
if g.user is None:
abort(401)
comment_id = request.form.get('comment_id')
value = request.form.get('value')
if value is None or comment_id is None:
abort(400)
support.process_vote(comment_id, g.user.name, value)
return "success"
:param comment_id: the comment being voted on
:param username: the unique username of the user voting
:param value: 1 for an upvote, -1 for a downvote, 0 for an unvote.
"""
value = int(value)
if not -1 <= value <= 1:
raise ValueError('vote value %s out of range (-1, 1)' % value)
self.storage.process_vote(comment_id, username, value)
def update_username(self, old_username, new_username):
"""To remain decoupled from a webapp's authentication system, the
web support package stores a user's username with each of their
comments and votes. If the authentication system allows a user to
change their username, this can lead to stagnate data in the web
support system. To avoid this, each time a username is changed, this
method should be called.
:param old_username: The original username.
:param new_username: The new username.
"""
self.storage.update_username(old_username, new_username)
def accept_comment(self, comment_id, moderator=False):
"""Accept a comment that is pending moderation.
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
if moderator is False.
:param comment_id: The id of the comment that was accepted.
:param moderator: Whether the user making the request is a moderator.
"""
if not moderator:
raise errors.UserNotAuthorizedError()
self.storage.accept_comment(comment_id)
def _make_base_comment_options(self):
"""Helper method to create the part of the COMMENT_OPTIONS javascript
that remains the same throughout the lifetime of the
:class:`~sphinx.websupport.WebSupport` object.
"""
self.base_comment_opts = {}
if self.docroot != '':
comment_urls = [
('addCommentURL', '_add_comment'),
('getCommentsURL', '_get_comments'),
('processVoteURL', '_process_vote'),
('acceptCommentURL', '_accept_comment'),
('deleteCommentURL', '_delete_comment')
]
for key, value in comment_urls:
self.base_comment_opts[key] = \
'/' + posixpath.join(self.docroot, value)
if self.staticroot != 'static':
static_urls = [
('commentImage', 'comment.png'),
('closeCommentImage', 'comment-close.png'),
('loadingImage', 'ajax-loader.gif'),
('commentBrightImage', 'comment-bright.png'),
('upArrow', 'up.png'),
('upArrowPressed', 'up-pressed.png'),
('downArrow', 'down.png'),
('downArrowPressed', 'down-pressed.png')
]
for key, value in static_urls:
self.base_comment_opts[key] = \
'/' + posixpath.join(self.staticroot, '_static', value)
def _make_comment_options(self, username, moderator):
"""Helper method to create the parts of the COMMENT_OPTIONS
javascript that are unique to each request.
:param username: The username of the user making the request.
:param moderator: Whether the user making the request is a moderator.
"""
rv = self.base_comment_opts.copy()
if username:
rv.update({
'voting': True,
'username': username,
'moderator': moderator,
})
return '''\
<script type="text/javascript">
var COMMENT_OPTIONS = %s;
</script>
''' % dump_json(rv)
def _make_metadata(self, data):
return '''\
<script type="text/javascript">
var COMMENT_METADATA = %s;
</script>
''' % dump_json(data)
def _parse_comment_text(self, text):
settings = {'file_insertion_enabled': False,
'raw_enabled': False,
'output_encoding': 'unicode'}
try:
ret = publish_parts(text, writer_name='html',
settings_overrides=settings)['fragment']
except Exception:
ret = htmlescape(text)
return ret
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import hashlib
import inspect
import os
import pyclbr
import re
import shutil
import stat
import sys
import tempfile
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
import retrying
import six
from cinder.brick.initiator import connector
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def is_int_like(val):
"""Check if a value looks like an int."""
try:
return str(int(val)) == str(val)
except Exception:
return False
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.iteritems():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), '')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s', e)
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""wrapper for the brick calls to automatically set
the root_helper needed for cinder.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized") % driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get the device's "major:minor" number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid
for non-admin API/context.
"""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = "Removing options '%s' from query." % bad_options
LOG.debug(log_msg)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check' % dev)
return False
def retry(exceptions, interval=1, retries=3, backoff_rate=2):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
LOG.debug("Sleeping for %s seconds", wait_for)
return wait_for * 1000.0
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Version %s is invalid.") % version
raise exception.CinderException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, six.text_type(version_number))
version_int = version_int / factor
return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers)
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DependentHostedNumberOrderList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, signing_document_sid):
"""
Initialize the DependentHostedNumberOrderList
:param Version version: Version that contains the resource
:param signing_document_sid: LOA document sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
super(DependentHostedNumberOrderList, self).__init__(version)
# Path Solution
self._solution = {'signing_document_sid': signing_document_sid, }
self._uri = '/AuthorizationDocuments/{signing_document_sid}/DependentHostedNumberOrders'.format(**self._solution)
def stream(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, limit=None, page_size=None):
"""
Streams DependentHostedNumberOrderInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
phone_number=phone_number,
incoming_phone_number_sid=incoming_phone_number_sid,
friendly_name=friendly_name,
unique_name=unique_name,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, limit=None, page_size=None):
"""
Lists DependentHostedNumberOrderInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
"""
return list(self.stream(
status=status,
phone_number=phone_number,
incoming_phone_number_sid=incoming_phone_number_sid,
friendly_name=friendly_name,
unique_name=unique_name,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of DependentHostedNumberOrderInstance records from the API.
Request is executed immediately
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
data = values.of({
'Status': status,
'PhoneNumber': phone_number,
'IncomingPhoneNumberSid': incoming_phone_number_sid,
'FriendlyName': friendly_name,
'UniqueName': unique_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return DependentHostedNumberOrderPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of DependentHostedNumberOrderInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DependentHostedNumberOrderPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderList>'
class DependentHostedNumberOrderPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the DependentHostedNumberOrderPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param signing_document_sid: LOA document sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
super(DependentHostedNumberOrderPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DependentHostedNumberOrderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
"""
return DependentHostedNumberOrderInstance(
self._version,
payload,
signing_document_sid=self._solution['signing_document_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderPage>'
class DependentHostedNumberOrderInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
class Status(object):
RECEIVED = "received"
PENDING_VERIFICATION = "pending-verification"
VERIFIED = "verified"
PENDING_LOA = "pending-loa"
CARRIER_PROCESSING = "carrier-processing"
TESTING = "testing"
COMPLETED = "completed"
FAILED = "failed"
ACTION_REQUIRED = "action-required"
class VerificationType(object):
PHONE_CALL = "phone-call"
PHONE_BILL = "phone-bill"
def __init__(self, version, payload, signing_document_sid):
"""
Initialize the DependentHostedNumberOrderInstance
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
"""
super(DependentHostedNumberOrderInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'incoming_phone_number_sid': payload.get('incoming_phone_number_sid'),
'address_sid': payload.get('address_sid'),
'signing_document_sid': payload.get('signing_document_sid'),
'phone_number': payload.get('phone_number'),
'capabilities': payload.get('capabilities'),
'friendly_name': payload.get('friendly_name'),
'unique_name': payload.get('unique_name'),
'status': payload.get('status'),
'failure_reason': payload.get('failure_reason'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'verification_attempts': deserialize.integer(payload.get('verification_attempts')),
'email': payload.get('email'),
'cc_emails': payload.get('cc_emails'),
'verification_type': payload.get('verification_type'),
'verification_document_sid': payload.get('verification_document_sid'),
'extension': payload.get('extension'),
'call_delay': deserialize.integer(payload.get('call_delay')),
'verification_code': payload.get('verification_code'),
'verification_call_sids': payload.get('verification_call_sids'),
}
# Context
self._context = None
self._solution = {'signing_document_sid': signing_document_sid, }
@property
def sid(self):
"""
:returns: HostedNumberOrder sid.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: Account sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def incoming_phone_number_sid(self):
"""
:returns: IncomingPhoneNumber sid.
:rtype: unicode
"""
return self._properties['incoming_phone_number_sid']
@property
def address_sid(self):
"""
:returns: Address sid.
:rtype: unicode
"""
return self._properties['address_sid']
@property
def signing_document_sid(self):
"""
:returns: LOA document sid.
:rtype: unicode
"""
return self._properties['signing_document_sid']
@property
def phone_number(self):
"""
:returns: An E164 formatted phone number.
:rtype: unicode
"""
return self._properties['phone_number']
@property
def capabilities(self):
"""
:returns: A mapping of phone number capabilities.
:rtype: unicode
"""
return self._properties['capabilities']
@property
def friendly_name(self):
"""
:returns: A human readable description of this resource.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def unique_name(self):
"""
:returns: A unique, developer assigned name of this HostedNumberOrder.
:rtype: unicode
"""
return self._properties['unique_name']
@property
def status(self):
"""
:returns: The Status of this HostedNumberOrder.
:rtype: DependentHostedNumberOrderInstance.Status
"""
return self._properties['status']
@property
def failure_reason(self):
"""
:returns: Why a hosted_number_order reached status "action-required"
:rtype: unicode
"""
return self._properties['failure_reason']
@property
def date_created(self):
"""
:returns: The date this HostedNumberOrder was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this HostedNumberOrder was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def verification_attempts(self):
"""
:returns: The number of attempts made to verify ownership of the phone number.
:rtype: unicode
"""
return self._properties['verification_attempts']
@property
def email(self):
"""
:returns: Email.
:rtype: unicode
"""
return self._properties['email']
@property
def cc_emails(self):
"""
:returns: A list of emails.
:rtype: list[unicode]
"""
return self._properties['cc_emails']
@property
def verification_type(self):
"""
:returns: The method used for verifying ownership of the number to be hosted.
:rtype: DependentHostedNumberOrderInstance.VerificationType
"""
return self._properties['verification_type']
@property
def verification_document_sid(self):
"""
:returns: Verification Document Sid.
:rtype: unicode
"""
return self._properties['verification_document_sid']
@property
def extension(self):
"""
:returns: Phone extension to use for ownership verification call.
:rtype: unicode
"""
return self._properties['extension']
@property
def call_delay(self):
"""
:returns: Seconds (0-30) to delay ownership verification call by.
:rtype: unicode
"""
return self._properties['call_delay']
@property
def verification_code(self):
"""
:returns: The digits passed during the ownership verification call.
:rtype: unicode
"""
return self._properties['verification_code']
@property
def verification_call_sids(self):
"""
:returns: List of IDs for ownership verification calls.
:rtype: list[unicode]
"""
return self._properties['verification_call_sids']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderInstance>'
|
|
"""
Support for the `MAF`_ multiple sequence alignment format used by `multiz`_.
.. _MAF: http://genome.ucsc.edu/FAQ/FAQformat.html#format5
.. _multiz: http://www.bx.psu.edu/miller_lab/
"""
from io import (
StringIO,
TextIOWrapper,
)
from bx import interval_index_file
from bx.align import (
Alignment,
Component
)
MAF_INVERSE_STATUS = 'V'
MAF_INSERT_STATUS = 'I'
MAF_CONTIG_STATUS = 'C'
MAF_CONTIG_NESTED_STATUS = 'c'
MAF_NEW_STATUS = 'N'
MAF_NEW_NESTED_STATUS = 'n'
MAF_MAYBE_NEW_STATUS = 'S'
MAF_MAYBE_NEW_NESTED_STATUS = 's'
MAF_MISSING_STATUS = 'M'
class MAFIndexedAccess(interval_index_file.AbstractIndexedAccess):
"""
Indexed access to a MAF file.
"""
def read_at_current_offset(self, file, **kwargs):
"""
Read the MAF block at the current position in `file` and return an
instance of `Alignment`.
"""
return read_next_maf(file, **kwargs)
def open_data(self):
data = super().open_data()
return TextIOWrapper(data, encoding="ascii")
class MAFMultiIndexedAccess(interval_index_file.AbstractMultiIndexedAccess):
"""
Indexed access to multiple MAF files.
"""
indexed_access_class = MAFIndexedAccess
Indexed = MAFIndexedAccess
"""Deprecated: `MAFIndexedAccess` is also available under the name `Indexed`."""
MultiIndexed = MAFMultiIndexedAccess
"""Deprecated: `MAFMultiIndexedAccess` is also available under the name `MultiIndexed`."""
class Reader:
"""
Iterate over all maf blocks in a file in order
"""
def __init__(self, file, **kwargs):
self.file = file
self.maf_kwargs = kwargs
# Read and verify maf header, store any attributes
fields = self.file.readline().split()
if fields[0] != '##maf':
raise Exception("File does not have MAF header")
self.attributes = parse_attributes(fields[1:])
def __next__(self):
return read_next_maf(self.file, **self.maf_kwargs)
def __iter__(self):
return ReaderIter(self)
def close(self):
self.file.close()
class ReaderIter:
"""
Adapts a `Reader` to the iterator protocol.
"""
def __init__(self, reader):
self.reader = reader
def __iter__(self):
return self
def __next__(self):
v = next(self.reader)
if not v:
raise StopIteration
return v
class Writer:
def __init__(self, file, attributes=None):
if attributes is None:
attributes = {}
self.file = file
# Write header, Webb's maf code wants version first, we accomodate
if 'version' not in attributes:
attributes['version'] = 1
self.file.write("##maf version=%s" % attributes['version'])
for key in attributes:
if key == 'version':
continue
self.file.writelines(" {}={}".format(key, attributes[key]))
self.file.write("\n")
def write(self, alignment):
self.file.write("a score=" + str(alignment.score))
for key in alignment.attributes:
self.file.write(" {}={}".format(key, alignment.attributes[key]))
self.file.write("\n")
# Components
rows = []
for c in alignment.components:
# "Empty component" generates an 'e' row
if c.empty:
rows.append(("e", c.src, str(c.start), str(c.size), c.strand, str(c.src_size), c.synteny_empty))
continue
# Regular component
rows.append(("s", c.src, str(c.start), str(c.size), c.strand, str(c.src_size), c.text))
# If component has quality, write a q row
if c.quality is not None:
rows.append(("q", c.src, "", "", "", "", c.quality))
# If component has synteny follow up with an 'i' row
if c.synteny_left and c.synteny_right:
rows.append(("i", c.src, "", "", "", "", " ".join(map(str, c.synteny_left + c.synteny_right))))
self.file.write(format_tabular(rows, "llrrrrl"))
self.file.write("\n")
def close(self):
self.file.close()
# ---- Helper methods -------------------------------------------------------
def from_string(string, **kwargs):
return read_next_maf(StringIO(string), **kwargs)
def read_next_maf(file, species_to_lengths=None, parse_e_rows=False):
"""
Read the next MAF block from `file` and return as an `Alignment`
instance. If `parse_e_rows` is true, empty components will be created
when e rows are encountered.
"""
alignment = Alignment(species_to_lengths=species_to_lengths)
# Attributes line
line = readline(file, skip_blank=True)
if not line:
return None
fields = line.split()
if fields[0] != 'a':
raise Exception("Expected 'a ...' line")
alignment.attributes = parse_attributes(fields[1:])
if 'score' in alignment.attributes:
alignment.score = alignment.attributes['score']
del alignment.attributes['score']
else:
alignment.score = 0
# Sequence lines
last_component = None
while True:
line = readline(file)
# EOF or Blank line terminates alignment components
if not line or line.isspace():
break
if line.isspace():
break
# Parse row
fields = line.split()
if fields[0] == 's':
# An 's' row contains sequence for a component
component = Component()
component.src = fields[1]
component.start = int(fields[2])
component.size = int(fields[3])
component.strand = fields[4]
component.src_size = int(fields[5])
if len(fields) > 6:
component.text = fields[6].strip()
# Add to set
alignment.add_component(component)
last_component = component
elif fields[0] == 'e':
# An 'e' row, when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows:
component = Component()
component.empty = True
component.src = fields[1]
component.start = int(fields[2])
component.size = int(fields[3])
component.strand = fields[4]
component.src_size = int(fields[5])
component.text = None
synteny = fields[6].strip()
assert len(synteny) == 1, \
"Synteny status in 'e' rows should be denoted with a single character code"
component.synteny_empty = synteny
alignment.add_component(component)
last_component = component
elif fields[0] == 'i':
# An 'i' row, indicates left and right synteny status for the
# previous component, we hope ;)
assert fields[1] == last_component.src, "'i' row does not follow matching 's' row"
last_component.synteny_left = (fields[2], int(fields[3]))
last_component.synteny_right = (fields[4], int(fields[5]))
elif fields[0] == 'q':
assert fields[1] == last_component.src, "'q' row does not follow matching 's' row"
# TODO: Should convert this to an integer array?
last_component.quality = fields[2]
return alignment
def readline(file, skip_blank=False):
"""Read a line from provided file, skipping any blank or comment lines"""
while True:
line = file.readline()
if not line:
return None
if line[0] != '#' and not (skip_blank and line.isspace()):
return line
def parse_attributes(fields):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split('=')
attributes[pair[0]] = pair[1]
return attributes
def format_tabular(rows, align=None):
if len(rows) == 0:
return ""
lengths = [len(col) for col in rows[0]]
for row in rows[1:]:
for i in range(0, len(row)):
lengths[i] = max(lengths[i], len(row[i]))
rval = ""
for row in rows:
for i in range(0, len(row)):
if align and align[i] == "l":
rval += row[i].ljust(lengths[i])
else:
rval += row[i].rjust(lengths[i])
rval += " "
rval += "\n"
return rval
|
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import glob
import json
import platform
import traceback
import os
import flask
from flask.ext.socketio import join_room, leave_room
from werkzeug import HTTP_STATUS_CODES
import werkzeug.exceptions
from .config import config_value
from .webapp import app, socketio, scheduler
import digits
from digits import dataset, extensions, model, utils, pretrained_model
from digits.log import logger
from digits.utils.routing import request_wants_json
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/index.json', methods=['GET'])
@blueprint.route('/', methods=['GET'])
def home(tab=2):
"""
DIGITS home page
Returns information about each job on the server
Returns JSON when requested:
{
datasets: [{id, name, status},...],
models: [{id, name, status},...]
}
"""
running_datasets = get_job_list(dataset.DatasetJob, True)
completed_datasets = get_job_list(dataset.DatasetJob, False)
running_models = get_job_list(model.ModelJob, True)
completed_models = get_job_list(model.ModelJob, False)
if request_wants_json():
data = {
'version': digits.__version__,
'jobs_dir': config_value('jobs_dir'),
'datasets': [j.json_dict()
for j in running_datasets + completed_datasets],
'models': [j.json_dict()
for j in running_models + completed_models],
}
if config_value('server_name'):
data['server_name'] = config_value('server_name')
return flask.jsonify(data)
else:
new_dataset_options = {
'Images': {
'image-classification': {
'title': 'Classification',
'url': flask.url_for(
'digits.dataset.images.classification.views.new'),
},
'image-other': {
'title': 'Other',
'url': flask.url_for(
'digits.dataset.images.generic.views.new'),
},
},
}
new_model_options = {
'Images': {
'image-classification': {
'title': 'Classification',
'url': flask.url_for(
'digits.model.images.classification.views.new'),
},
'image-other': {
'title': 'Other',
'url': flask.url_for(
'digits.model.images.generic.views.new'),
},
},
}
load_model_options = {
'Images': {
'pretrained-model': {
'title': 'Upload Pretrained Model',
'id': 'uploadPretrainedModel',
'url': flask.url_for(
'digits.pretrained_model.views.new'),
},
},
}
# add dataset options for known dataset extensions
data_extensions = extensions.data.get_extensions()
for extension in data_extensions:
ext_category = extension.get_category()
ext_title = extension.get_title()
ext_id = extension.get_id()
if ext_category not in new_dataset_options:
new_dataset_options[ext_category] = {}
new_dataset_options[ext_category][ext_id] = {
'title': ext_title,
'url': flask.url_for(
'digits.dataset.generic.views.new',
extension_id=ext_id),
}
if ext_category not in new_model_options:
new_model_options[ext_category] = {}
new_model_options[ext_category][ext_id] = {
'title': ext_title,
'url': flask.url_for(
'digits.model.images.generic.views.new',
extension_id=ext_id),
}
return flask.render_template(
'home.html',
tab=tab,
new_dataset_options=new_dataset_options,
running_datasets=running_datasets,
completed_datasets=completed_datasets,
new_model_options=new_model_options,
running_models=running_models,
completed_models=completed_models,
load_model_options=load_model_options,
total_gpu_count=len(scheduler.resources['gpus']),
remaining_gpu_count=sum(r.remaining()
for r in scheduler.resources['gpus']),
)
def json_dict(job, model_output_fields):
d = {
'id': job.id(),
'name': job.name(),
'group': job.group,
'status': job.status_of_tasks().name,
'status_css': job.status_of_tasks().css,
'submitted': job.status_history[0][1],
'elapsed': job.runtime_of_tasks(),
}
if 'train_db_task' in dir(job):
d.update({
'backend': job.train_db_task().backend,
})
if 'train_task' in dir(job):
d.update({
'framework': job.train_task().get_framework_id(),
})
for prefix, outputs in (('train', job.train_task().train_outputs),
('val', job.train_task().val_outputs)):
for key in outputs.keys():
data = outputs[key].data
if len(data) > 0:
key = '%s (%s) ' % (key, prefix)
model_output_fields.add(key + 'last')
model_output_fields.add(key + 'min')
model_output_fields.add(key + 'max')
d.update({key + 'last': data[-1]})
d.update({key + 'min': min(data)})
d.update({key + 'max': max(data)})
if (job.train_task().combined_graph_data() and
'columns' in job.train_task().combined_graph_data()):
d.update({
'sparkline': job.train_task().combined_graph_data()['columns'][0][1:],
})
if 'get_progress' in dir(job):
d.update({
'progress': int(round(100*job.get_progress())),
})
if hasattr(job, 'dataset_id'):
d.update({
'dataset_id': job.dataset_id,
})
if isinstance(job, dataset.DatasetJob):
d.update({ 'type': 'dataset' })
if isinstance(job, model.ModelJob):
d.update({ 'type': 'model' })
if isinstance(job, pretrained_model.PretrainedModelJob):
model_output_fields.add("has_labels")
model_output_fields.add("username")
d.update({
'type': 'pretrained_model',
'framework': job.framework,
'username': job.username,
'has_labels': job.has_labels_file()
})
return d
@blueprint.route('/completed_jobs.json', methods=['GET'])
def completed_jobs():
"""
Returns JSON
{
datasets: [{id, name, group, status, status_css, submitted, elapsed, badge}],
models: [{id, name, group, status, status_css, submitted, elapsed, badge}],
}
"""
completed_datasets = get_job_list(dataset.DatasetJob, False)
completed_models = get_job_list(model.ModelJob, False)
running_datasets = get_job_list(dataset.DatasetJob, True)
running_models = get_job_list(model.ModelJob, True)
pretrained_models = get_job_list(pretrained_model.PretrainedModelJob,False)
model_output_fields = set()
data = {
'running': [json_dict(j, model_output_fields) for j in running_datasets + running_models],
'datasets': [json_dict(j, model_output_fields) for j in completed_datasets],
'models': [json_dict(j, model_output_fields) for j in completed_models],
'pretrained_models': [json_dict(j, model_output_fields) for j in pretrained_models],
'model_output_fields': sorted(list(model_output_fields)),
}
return flask.jsonify(data)
@blueprint.route('/jobs/<job_id>/table_data.json', methods=['GET'])
def job_table_data(job_id):
"""
Get the job data for the front page tables
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
model_output_fields = set()
return flask.jsonify({'job': json_dict(job, model_output_fields)})
def get_job_list(cls, running):
return sorted(
[j for j in scheduler.jobs.values() if isinstance(j, cls) and j.status.is_running() == running],
key=lambda j: j.status_history[0][1],
reverse=True,
)
@blueprint.route('/group', methods=['GET','POST'])
def group():
"""
Assign the group for the listed jobs
"""
not_found = 0
forbidden = 0
group_name = utils.routing.get_request_arg('group_name').strip()
job_ids = flask.request.form.getlist('job_ids[]')
error = []
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
logger.warning('Job %s not found for group assignment.' % job_id)
not_found += 1
continue
if not utils.auth.has_permission(job, 'edit'):
logger.warning('Group assignment not permitted for job %s' % job_id)
forbidden += 1
continue
job.group = group_name
# update form data so updated name gets used when cloning job
if hasattr(job, 'form_data'):
job.form_data['form.group_name.data'] = job.group
job.emit_attribute_changed('group', job.group)
except Exception as e:
error.append(e)
pass
for job_id in job_ids:
job = scheduler.get_job(job_id)
error = []
if not_found:
error.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
error.append('%d job%s not permitted to be regrouped.' % (forbidden, '' if forbidden == 1 else 's'))
if len(error) > 0:
error = ' '.join(error)
raise werkzeug.exceptions.BadRequest(error)
return 'Jobs regrouped.'
### Authentication/login
@blueprint.route('/login', methods=['GET','POST'])
def login():
"""
Ask for a username (no password required)
Sets a cookie
"""
# Get the URL to redirect to after logging in
next_url = utils.routing.get_request_arg('next') or \
flask.request.referrer or flask.url_for('.home')
if flask.request.method == 'GET':
return flask.render_template('login.html', next=next_url)
# Validate username
username = utils.routing.get_request_arg('username').strip()
try:
utils.auth.validate_username(username)
except ValueError as e:
# Invalid username
flask.flash(e.message, 'danger')
return flask.render_template('login.html', next=next_url)
# Valid username
response = flask.make_response(flask.redirect(next_url))
response.set_cookie('username', username)
return response
@blueprint.route('/logout', methods=['GET','POST'])
def logout():
"""
Unset the username cookie
"""
next_url = utils.routing.get_request_arg('next') or \
flask.request.referrer or flask.url_for('.home')
response = flask.make_response(flask.redirect(next_url))
response.set_cookie('username', '', expires=0)
return response
### Jobs routes
@blueprint.route('/jobs/<job_id>', methods=['GET'])
def show_job(job_id):
"""
Redirects to the appropriate /datasets/ or /models/ page
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if isinstance(job, dataset.DatasetJob):
return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job_id))
if isinstance(job, model.ModelJob):
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job_id))
if isinstance(job, pretrained_model.PretrainedModelJob):
return flask.redirect(flask.url_for('digits.pretrained_model.views.show', job_id=job_id))
else:
raise werkzeug.exceptions.BadRequest('Invalid job type')
@blueprint.route('/jobs/<job_id>', methods=['PUT'])
@utils.auth.requires_login(redirect=False)
def edit_job(job_id):
"""
Edit a job's name and/or notes
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if not utils.auth.has_permission(job, 'edit'):
raise werkzeug.exceptions.Forbidden()
# Edit name
if 'job_name' in flask.request.form:
name = flask.request.form['job_name'].strip()
if not name:
raise werkzeug.exceptions.BadRequest('name cannot be blank')
job._name = name
job.emit_attribute_changed('name', job.name())
# update form data so updated name gets used when cloning job
if 'form.dataset_name.data' in job.form_data:
job.form_data['form.dataset_name.data'] = name
elif 'form.model_name.data' in job.form_data:
job.form_data['form.model_name.data'] = name
else:
# we are utterly confused
raise werkzeug.exceptions.BadRequest('Unable to edit job type %s' % job.job_type())
logger.info('Set name to "%s".' % job.name(), job_id=job.id())
# Edit notes
if 'job_notes' in flask.request.form:
notes = flask.request.form['job_notes'].strip()
if not notes:
notes = None
job._notes = notes
logger.info('Updated notes.', job_id=job.id())
return '%s updated.' % job.job_type()
@blueprint.route('/datasets/<job_id>/status', methods=['GET'])
@blueprint.route('/models/<job_id>/status', methods=['GET'])
@blueprint.route('/jobs/<job_id>/status', methods=['GET'])
def job_status(job_id):
"""
Returns a JSON objecting representing the status of a job
"""
job = scheduler.get_job(job_id)
result = {}
if job is None:
result['error'] = 'Job not found.'
else:
result['error'] = None
result['status'] = job.status.name
result['name'] = job.name()
result['type'] = job.job_type()
return json.dumps(result)
@blueprint.route('/pretrained_models/<job_id>', methods=['DELETE'])
@blueprint.route('/datasets/<job_id>', methods=['DELETE'])
@blueprint.route('/models/<job_id>', methods=['DELETE'])
@blueprint.route('/jobs/<job_id>', methods=['DELETE'])
@utils.auth.requires_login(redirect=False)
def delete_job(job_id):
"""
Deletes a job
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if not utils.auth.has_permission(job, 'delete'):
raise werkzeug.exceptions.Forbidden()
try:
if scheduler.delete_job(job_id):
return 'Job deleted.'
else:
raise werkzeug.exceptions.Forbidden('Job not deleted')
except utils.errors.DeleteError as e:
raise werkzeug.exceptions.Forbidden(str(e))
@blueprint.route('/jobs', methods=['DELETE'])
@utils.auth.requires_login(redirect=False)
def delete_jobs():
"""
Deletes a list of jobs
"""
not_found = 0
forbidden = 0
failed = 0
job_ids = flask.request.form.getlist('job_ids[]')
error = []
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
not_found += 1
continue
if not utils.auth.has_permission(job, 'delete'):
forbidden += 1
continue
if not scheduler.delete_job(job_id):
failed += 1
continue
except Exception as e:
error.append(str(e))
pass
if not_found:
error.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
error.append('%d job%s not permitted to be deleted.' % (forbidden, '' if forbidden == 1 else 's'))
if failed:
error.append('%d job%s failed to delete.' % (failed, '' if failed == 1 else 's'))
if len(error) > 0:
error = ' '.join(error)
raise werkzeug.exceptions.BadRequest(error)
return 'Jobs deleted.'
@blueprint.route('/abort_jobs', methods=['POST'])
@utils.auth.requires_login(redirect=False)
def abort_jobs():
"""
Aborts a list of jobs
"""
not_found = 0
forbidden = 0
failed = 0
job_ids = flask.request.form.getlist('job_ids[]')
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
not_found += 1
continue
if not utils.auth.has_permission(job, 'abort'):
forbidden += 1
continue
if not scheduler.abort_job(job_id):
failed += 1
continue
except Exception as e:
error.append(e)
pass
error = []
if not_found:
error.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
error.append('%d job%s not permitted to be aborted.' % (forbidden, '' if forbidden == 1 else 's'))
if failed:
error.append('%d job%s failed to abort.' % (failed, '' if failed == 1 else 's'))
if len(error) > 0:
error = ' '.join(error)
raise werkzeug.exceptions.BadRequest(error)
return 'Jobs aborted.'
@blueprint.route('/datasets/<job_id>/abort', methods=['POST'])
@blueprint.route('/models/<job_id>/abort', methods=['POST'])
@blueprint.route('/jobs/<job_id>/abort', methods=['POST'])
@utils.auth.requires_login(redirect=False)
def abort_job(job_id):
"""
Aborts a running job
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if scheduler.abort_job(job_id):
return 'Job aborted.'
else:
raise werkzeug.exceptions.Forbidden('Job not aborted')
@blueprint.route('/clone/<clone>', methods=['POST', 'GET'])
@utils.auth.requires_login
def clone_job(clone):
"""
Clones a job with the id <clone>, populating the creation page with data saved in <clone>
"""
## <clone> is the job_id to clone
job = scheduler.get_job(clone)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if isinstance(job, dataset.GenericDatasetJob):
return flask.redirect(flask.url_for('digits.dataset.generic.views.new', extension_id=job.extension_id) + '?clone=' + clone)
if isinstance(job, dataset.ImageClassificationDatasetJob):
return flask.redirect(flask.url_for('digits.dataset.images.classification.views.new') + '?clone=' + clone)
if isinstance(job, dataset.GenericImageDatasetJob):
return flask.redirect(flask.url_for('digits.dataset.images.generic.views.new') + '?clone=' + clone)
if isinstance(job, model.ImageClassificationModelJob):
return flask.redirect(flask.url_for('digits.model.images.classification.views.new') + '?clone=' + clone)
if isinstance(job, model.GenericImageModelJob):
return flask.redirect(flask.url_for('digits.model.images.generic.views.new') + '?clone=' + clone)
else:
raise werkzeug.exceptions.BadRequest('Invalid job type')
### Error handling
@app.errorhandler(Exception)
def handle_error(e):
"""
Handle errors, formatting them as JSON if requested
"""
error_type = type(e).__name__
message = str(e)
trace = None
description = None
status_code = 500
if isinstance(e, werkzeug.exceptions.HTTPException):
status_code = e.code
description = e.description
if app.debug:
trace = traceback.format_exc()
if request_wants_json():
details = {
'message': message,
'type': error_type,
}
if description is not None:
details['description'] = description
if trace is not None:
details['trace'] = trace.split('\n')
return flask.jsonify({'error': details}), status_code
else:
return flask.render_template('error.html',
title = error_type,
message = message,
description = description,
trace = trace,
), status_code
# Register this handler for all error codes
# Necessary for flask<=0.10.1
for code in HTTP_STATUS_CODES:
if code not in [301]:
app.register_error_handler(code, handle_error)
### File serving
@blueprint.route('/files/<path:path>', methods=['GET'])
def serve_file(path):
"""
Return a file in the jobs directory
If you install the nginx.site file, nginx will serve files instead
and this path will never be used
"""
jobs_dir = config_value('jobs_dir')
return flask.send_from_directory(jobs_dir, path)
### Path Completion
@blueprint.route('/autocomplete/path', methods=['GET'])
def path_autocomplete():
"""
Return a list of paths matching the specified preamble
"""
path = flask.request.args.get('query','')
if not os.path.isabs(path) :
# Only allow absolute paths by prepending forward slash
path = os.path.sep + path
suggestions = [os.path.abspath(p) for p in glob.glob(path+"*")]
if platform.system() == 'Windows':
# on windows, convert backslashes with forward slashes
suggestions = [p.replace('\\', '/') for p in suggestions]
result = {
"suggestions": sorted(suggestions)
}
return json.dumps(result)
### SocketIO functions
## /home
@socketio.on('connect', namespace='/home')
def on_connect_home():
"""
Somebody connected to the homepage
"""
pass
@socketio.on('disconnect', namespace='/home')
def on_disconnect_home():
"""
Somebody disconnected from the homepage
"""
pass
## /jobs
@socketio.on('connect', namespace='/jobs')
def on_connect_jobs():
"""
Somebody connected to a jobs page
"""
pass
@socketio.on('disconnect', namespace='/jobs')
def on_disconnect_jobs():
"""
Somebody disconnected from a jobs page
"""
pass
@socketio.on('join', namespace='/jobs')
def on_join_jobs(data):
"""
Somebody joined a room
"""
room = data['room']
join_room(room)
flask.session['room'] = room
@socketio.on('leave', namespace='/jobs')
def on_leave_jobs():
"""
Somebody left a room
"""
if 'room' in flask.session:
room = flask.session['room']
del flask.session['room']
#print '>>> Somebody left room %s' % room
leave_room(room)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
---
module: os_firewall_manage_iptables
short_description: This module manages iptables rules for a given chain
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
class IpTablesAddRuleError(IpTablesError):
pass
class IpTablesRemoveRuleError(IpTablesError):
pass
class IpTablesSaveError(IpTablesError):
pass
class IpTablesCreateChainError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
self.check_mode = module.check_mode
self.chain = module.params['chain']
self.create_jump_rule = module.params['create_jump_rule']
self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
self.changed = False
def save(self):
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
self.create_chain()
if self.create_jump_rule and not self.jump_rule_exists():
self.create_jump()
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
self.verify_chain()
if self.check_mode:
self.changed = True
self.output.append("Create rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-A'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
"%s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if self.rule_exists(rule):
if self.check_mode:
self.changed = True
self.output.append("Remove rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-D'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
last_rule_target = None
for rule in input_rules[:-1]:
if rule:
try:
last_rule_num = int(rule[0])
except ValueError:
continue
last_rule_target = rule[1]
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and last_rule_target == 'REJECT'):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
except subprocess.CalledProcessError as ex:
if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to query existing " +
self.jump_rule_chain +
" rules to determine jump rule location"),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to create jump rule for chain " +
self.chain),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def create_chain(self):
if self.check_mode:
self.changed = True
self.output.append("Create chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-N', self.chain]
self.output.append(check_output(cmd,
stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
supports_check_mode=True
)
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
if action in ['add', 'remove']:
if not protocol:
error = "protocol is required when action is %s" % action
module.fail_json(msg=error)
if not port:
error = "port is required when action is %s" % action
module.fail_json(msg=error)
iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
except IpTablesError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.