gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import logging
import logging.config
LOG = logging.getLogger(__name__)
simple_ascii = "/-v\\+^|><`*:"
fancy_unicode = "\u250C\u2500\u252C\u2510\u253C\u2534\u2502\u251C\u2524\u2514\u2518\u250A"
try:
trans = "".maketrans(simple_ascii, fancy_unicode)
except AttributeError:
import string
# apparantly python2 doesn't get to have nice things.
trans = string.maketrans(simple_ascii, simple_ascii)
class TableView(object):
"""It's turtles all the way down."""
def __init__(self, title=None, width="auto", center=False, link=None):
self.columns = []
self.title = title
self.autowidth = width == "auto"
self.width = width
self.center = center
self.link = link
self.data = []
def set_format(self):
if self.center:
self.title_format = "{:^%i.%i}" % (self.width, self.width)
else:
self.title_format = "{:>%i.%i}" % (self.width, self.width)
self.data_format = "{:>%i.%i}" % (self.width, self.width)
# print('title_format: %s' % self.title_format)
# print('data_format: %s' % self.data_format)
def get_height(self):
height = 0
for column in self.columns:
height = max(1 + column.get_height(), height)
return height
def get_title(self, depth=0):
self.set_format()
if depth == 0:
return self.title_format.format(self.title)
else:
title = []
for column in self.columns:
title.append(column.get_title(depth=depth - 1))
return self.title_format.format(':'.join(title))
def get_value(self, data):
self.set_format()
value = []
if self.link and self.link in data:
value.append(str(data[self.link]))
elif self.link:
LOG.debug('could not find %s in %s', self.link, data.keys())
else:
LOG.debug('no link for %s', self.title)
for column in self.columns:
v = column.get_value(data)
if v:
value.append(str(v))
self.set_format()
# LOG.info('value: %s', value)
return self.data_format.format(":".translate(trans).join(value))
def layout(self, data):
autowidth = 0
for column in self.columns:
column.layout(data)
autowidth += column.width
if self.columns:
autowidth += len(self.columns) - 1
if autowidth == 0:
for data_dict in data:
if self.link in data_dict:
autowidth = max(autowidth, len(str(data_dict[self.link])))
LOG.debug(
"found %s == %s (%s)",
self.link, data_dict[self.link],
autowidth
)
else:
LOG.debug(
"could not find %s in %s",
self.link, data_dict.keys()
)
# at least 5
autowidth = max(autowidth, 3)
if self.autowidth:
self.width = autowidth
def add_column(self, column):
self.columns.append(column)
def add_columns(self, columns):
for column in columns:
self.add_column(column)
def makebar(self, top=False, bottom=False):
if top:
bar = "/"
elif bottom:
bar = "`"
else:
# side
bar = ">"
for index, col in enumerate(self.columns):
last = (index + 1) == len(self.columns)
bar += "-" * col.width
if top:
if last:
bar += "\\"
else:
bar += "v"
elif bottom:
if last:
bar += "*"
else:
bar += "^"
else:
if last:
bar += "<"
else:
bar += "+"
return bar.translate(trans)
def set_data(self, data):
"""
Data is expected to be a list of dictionaries, the keys are 'link' strings.
"""
self.data = data
def __str__(self):
self.layout(self.data)
vbar = "|".translate(trans)
out = []
# header
out.append(self.makebar(top=True))
# major labels
height = 0
# what is the "tallest" column header?
height = self.get_height()
# for index, col in enumerate(self.columns):
# height = max(col.get_height, height)
# print('max height: %i' % height)
for row in range(height):
# print('row: %i' % row)
row_string = vbar
for index, column in enumerate(self.columns):
# print("Working on %r" % column)
titles = column.get_title(depth=row)
row_string += titles + vbar
out.append(row_string.translate(trans))
out.append(self.makebar())
for data_dict in self.data:
row_string = vbar
for index, column in enumerate(self.columns):
values = column.get_value(data_dict)
row_string += values + vbar
out.append(row_string)
out.append(self.makebar(bottom=True))
return "\n".join(out)
def main(context, namespace_name):
tv = TableView()
alpha = TableView('alpha', center=True, link="alpha")
beta = TableView('beta', center=True, link="beta")
gamma = TableView('gamma', center=True, link="gamma")
delta = TableView('delta', center=True)
epsilon = TableView('epsilon', center=True, link="epsilon")
delta.add_column(epsilon)
phi = TableView('phi', center=True)
chi = TableView('chi', center=True, link="chi")
psi = TableView('psi', center=True, link="psi")
phi.add_columns([chi, psi])
tv.add_columns([alpha, beta, gamma, delta, phi])
tv.set_data([{
'alpha': '_alpha1_',
'beta': '_beta1_',
'gamma': '_gamma1_',
'epsilon': '_epsilon1_',
'chi': '_chi1_',
'psi': '_psi1_',
}, {
'alpha': '_alpha2_',
'beta': '_beta2_',
'gamma': '_gamma2_',
'epsilon': '_epsilon2_',
'chi': '_chi2_',
'psi': '_psi2_',
}, {
'alpha': '_alpha3_',
'beta': '_beta3_',
'gamma': '_gamma3_',
'epsilon': '_epsilon3_',
'chi': '_chi3_',
'psi': '_psi3_',
}])
print(tv)
if __name__ == "__main__":
args = docopt.docopt(__doc__)
context = args['--context']
namespace = args['--namespace']
main(context, namespace)
|
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.pets.PetGoal
from direct.task import Task
from direct.fsm import FSM, ClassicFSM, State
from direct.showbase.PythonUtil import randFloat, Functor
from direct.directnotify import DirectNotifyGlobal
from toontown.pets import PetConstants
from toontown.toon import DistributedToonAI
class PetGoal(FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('PetGoal')
SerialNum = 0
def __init__(self):
FSM.FSM.__init__(self, self.__class__.__name__)
self.goalMgr = None
self.pet = None
self.brain = None
self.removeOnDone = 0
self.serialNum = PetGoal.SerialNum
PetGoal.SerialNum += 1
self.fsm = ClassicFSM.ClassicFSM('PetGoalFSM', [State.State('off', self.enterOff, self.exitOff, ['background']), State.State('background', self.enterBackground, self.exitBackground, ['foreground']), State.State('foreground', self.enterForeground, self.exitForeground, ['background'])], 'off', 'off')
self.fsm.enterInitialState()
return
def destroy(self):
if hasattr(self, 'fsm'):
self.fsm.requestFinalState()
del self.fsm
self.cleanup()
def _removeSelf(self):
self.goalMgr.removeGoal(self)
def getDoneEvent(self):
return 'PetGoalDone-%s' % self.serialNum
def announceDone(self):
if self.removeOnDone:
self._removeSelf()
messenger.send(self.getDoneEvent())
if self.removeOnDone:
self.destroy()
def setGoalMgr(self, goalMgr):
self.goalMgr = goalMgr
self.pet = goalMgr.pet
self.brain = self.pet.brain
self.fsm.request('background')
def clearGoalMgr(self):
self.goalMgr = None
self.pet = None
self.brain = None
self.fsm.requestFinalState()
return
def getPriority(self):
return PetConstants.PriorityDefault
def enterOff(self):
pass
def exitOff(self):
pass
def enterBackground(self):
pass
def exitBackground(self):
pass
def enterForeground(self):
pass
def exitForeground(self):
pass
def __repr__(self):
return self.__str__()
def __str__(self):
return '%s: %s' % (self.__class__.__name__, self.getPriority())
class InteractWithAvatar(PetGoal):
SerialNum = 0
def __init__(self, avatar):
PetGoal.__init__(self)
self.avatar = avatar
self.serialNum = InteractWithAvatar.SerialNum
InteractWithAvatar.SerialNum += 1
self.transitionDoLaterName = '%s-doLater-%s' % (InteractWithAvatar.__name__, self.serialNum)
def destroy(self):
PetGoal.destroy(self)
if hasattr(self, 'avatar'):
del self.avatar
def enterForeground(self):
self.request('Chase')
def exitForeground(self):
self.request('Off')
def enterChase(self):
PetGoal.notify.debug('enterChase')
if self.brain.lookingAt(self.avatar.doId):
def goToInteract(task = None, self = self):
self.request('Interact')
return Task.done
taskMgr.doMethodLater(0.0001, goToInteract, self.transitionDoLaterName)
else:
self.accept(self.brain.getObserveEventAttendingAvStart(self.avatar.doId), Functor(self.request, 'Interact'))
self.brain._chase(self.avatar)
return
def exitChase(self):
self.ignore(self.brain.getObserveEventAttendingAvStart(self.avatar.doId))
taskMgr.remove(self.transitionDoLaterName)
def enterInteract(self):
PetGoal.notify.debug('enterInteract')
if self._chaseAvInInteractMode():
self.accept(self.brain.getObserveEventAttendingAvStop(self.avatar.doId), Functor(self.request, 'Chase'))
self.startInteract()
def exitInteract(self):
self.stopInteract()
self.ignore(self.brain.getObserveEventAttendingAvStop(self.avatar.doId))
def startInteract(self):
pass
def stopInteract(self):
pass
def _chaseAvInInteractMode(self):
return True
def __str__(self):
return '%s-%s: %s' % (self.__class__.__name__, self.avatar.doId, self.getPriority())
class Wander(PetGoal):
def enterForeground(self):
self.brain._wander()
class ChaseAvatar(PetGoal):
def __init__(self, avatar):
PetGoal.__init__(self)
self.avatar = avatar
self.isToon = isinstance(self.avatar, DistributedToonAI.DistributedToonAI)
def destroy(self):
PetGoal.destroy(self)
if hasattr(self, 'avatar'):
del self.avatar
def setGoalMgr(self, goalMgr):
PetGoal.setGoalMgr(self, goalMgr)
self.basePriority = PetConstants.PriorityChaseAv
def getPriority(self):
priority = self.basePriority
if self.isToon and self.pet.mood.getDominantMood() == 'hunger':
priority *= PetConstants.HungerChaseToonScale
lastInteractTime = self.brain.lastInteractTime.get(self.avatar.doId)
if lastInteractTime is not None:
elapsed = globalClock.getFrameTime() - lastInteractTime
if elapsed < PetConstants.GettingAttentionGoalScaleDur:
priority *= PetConstants.GettingAttentionGoalScale
return priority
def enterForeground(self):
self.brain._chase(self.avatar)
def __str__(self):
return '%s-%s: %s' % (self.__class__.__name__, self.avatar.doId, self.getPriority())
class ChaseAvatarLeash(PetGoal):
def __init__(self, avId):
PetGoal.__init__(self)
self.avId = avId
def getPriority(self):
return PetConstants.PriorityDebugLeash
def enterForeground(self):
av = simbase.air.doId2do.get(self.avId)
if av:
self.brain._chase(av)
else:
self._removeSelf()
def __str__(self):
return '%s-%s: %s' % (self.__class__.__name__, self.avatar.doId, self.getPriority())
class FleeFromAvatar(PetGoal):
def __init__(self, avatar):
PetGoal.__init__(self)
self.avatar = avatar
def destroy(self):
PetGoal.destroy(self)
if hasattr(self, 'avatar'):
del self.avatar
def getPriority(self):
priority = PetConstants.PriorityFleeFromAvatar
if self.avatar.doId == self.goalMgr.pet.ownerId:
priority *= PetConstants.FleeFromOwnerScale
return priority
def enterForeground(self):
self.brain._chase(self.avatar)
def __str__(self):
return '%s-%s: %s' % (self.__class__.__name__, self.avatar.doId, self.getPriority())
class DoTrick(InteractWithAvatar):
def __init__(self, avatar, trickId):
InteractWithAvatar.__init__(self, avatar)
self.trickId = trickId
self.removeOnDone = 1
def getPriority(self):
return PetConstants.PriorityDoTrick
def setGoalMgr(self, goalMgr):
goalMgr._setHasTrickGoal(True)
InteractWithAvatar.setGoalMgr(self, goalMgr)
def clearGoalMgr(self):
self.goalMgr._setHasTrickGoal(False)
InteractWithAvatar.clearGoalMgr(self)
def _chaseAvInInteractMode(self):
return False
def startInteract(self):
self.brain._doTrick(self.trickId, self.avatar)
self.trickDoneEvent = self.pet.actionFSM.getTrickDoneEvent()
self.accept(self.trickDoneEvent, self.announceDone)
def stopInteract(self):
self.ignore(self.trickDoneEvent)
del self.trickDoneEvent
def __str__(self):
return '%s-%s-%s: %s' % (self.__class__.__name__,
self.avatar.doId,
self.trickId,
self.getPriority())
|
|
"""
Support for MQTT message handling.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import asyncio
import logging
import os
import socket
import time
import ssl
import re
import requests.certs
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import template, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_VALUE_TEMPLATE, CONF_USERNAME,
CONF_PASSWORD, CONF_PORT, CONF_PROTOCOL, CONF_PAYLOAD)
from homeassistant.components.mqtt.server import HBMQTT_CONFIG_SCHEMA
REQUIREMENTS = ['paho-mqtt==1.3.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt'
DATA_MQTT = 'mqtt'
SERVICE_PUBLISH = 'publish'
SIGNAL_MQTT_MESSAGE_RECEIVED = 'mqtt_message_received'
CONF_EMBEDDED = 'embedded'
CONF_BROKER = 'broker'
CONF_CLIENT_ID = 'client_id'
CONF_DISCOVERY = 'discovery'
CONF_DISCOVERY_PREFIX = 'discovery_prefix'
CONF_KEEPALIVE = 'keepalive'
CONF_CERTIFICATE = 'certificate'
CONF_CLIENT_KEY = 'client_key'
CONF_CLIENT_CERT = 'client_cert'
CONF_TLS_INSECURE = 'tls_insecure'
CONF_TLS_VERSION = 'tls_version'
CONF_BIRTH_MESSAGE = 'birth_message'
CONF_WILL_MESSAGE = 'will_message'
CONF_STATE_TOPIC = 'state_topic'
CONF_COMMAND_TOPIC = 'command_topic'
CONF_QOS = 'qos'
CONF_RETAIN = 'retain'
PROTOCOL_31 = '3.1'
PROTOCOL_311 = '3.1.1'
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
DEFAULT_RETAIN = False
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_DISCOVERY = False
DEFAULT_DISCOVERY_PREFIX = 'homeassistant'
DEFAULT_TLS_PROTOCOL = 'auto'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_PAYLOAD_TEMPLATE = 'payload_template'
ATTR_QOS = CONF_QOS
ATTR_RETAIN = CONF_RETAIN
MAX_RECONNECT_WAIT = 300 # seconds
def valid_subscribe_topic(value, invalid_chars='\0'):
"""Validate that we can subscribe using this MQTT topic."""
value = cv.string(value)
if all(c not in value for c in invalid_chars):
return vol.Length(min=1, max=65535)(value)
raise vol.Invalid('Invalid MQTT topic name')
def valid_publish_topic(value):
"""Validate that we can publish using this MQTT topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0')
def valid_discovery_topic(value):
"""Validate a discovery topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0/')
_VALID_QOS_SCHEMA = vol.All(vol.Coerce(int), vol.In([0, 1, 2]))
CLIENT_KEY_AUTH_MSG = 'client_key and client_cert must both be present in ' \
'the MQTT broker configuration'
MQTT_WILL_BIRTH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Required(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE):
vol.All(vol.Coerce(int), vol.Range(min=15)),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any('auto', cv.isfile),
vol.Inclusive(CONF_CLIENT_KEY, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Inclusive(CONF_CLIENT_CERT, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL):
vol.Any('auto', '1.0', '1.1', '1.2'),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL):
vol.All(cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])),
vol.Optional(CONF_EMBEDDED): HBMQTT_CONFIG_SCHEMA,
vol.Optional(CONF_WILL_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
vol.Optional(CONF_DISCOVERY_PREFIX,
default=DEFAULT_DISCOVERY_PREFIX): valid_discovery_topic,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_BASE = {
vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
}
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): object,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
def _build_publish_data(topic, qos, retain):
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
def publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
def async_publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
def publish_template(hass, topic, payload_template, qos=None, retain=None):
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
@asyncio.coroutine
def async_subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
@callback
def async_mqtt_topic_subscriber(dp_topic, dp_payload, dp_qos):
"""Match subscribed MQTT topic."""
if not _match_topic(topic, dp_topic):
return
if encoding is not None:
try:
payload = dp_payload.decode(encoding)
_LOGGER.debug("Received message on %s: %s", dp_topic, payload)
except (AttributeError, UnicodeDecodeError):
_LOGGER.error("Illegal payload encoding %s from "
"MQTT topic: %s, Payload: %s",
encoding, dp_topic, dp_payload)
return
else:
_LOGGER.debug("Received binary message on %s", dp_topic)
payload = dp_payload
hass.async_run_job(msg_callback, dp_topic, payload, dp_qos)
async_remove = async_dispatcher_connect(
hass, SIGNAL_MQTT_MESSAGE_RECEIVED, async_mqtt_topic_subscriber)
yield from hass.data[DATA_MQTT].async_subscribe(topic, qos)
return async_remove
def subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
async_remove = run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
@asyncio.coroutine
def _async_setup_server(hass, config):
"""Try to start embedded MQTT broker.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
server = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'server')
if server is None:
_LOGGER.error("Unable to load embedded server")
return None
success, broker_config = \
yield from server.async_start(hass, conf.get(CONF_EMBEDDED))
return success and broker_config
@asyncio.coroutine
def _async_setup_discovery(hass, config):
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
discovery = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'discovery')
if discovery is None:
_LOGGER.error("Unable to load MQTT discovery")
return None
success = yield from discovery.async_start(
hass, conf[CONF_DISCOVERY_PREFIX], config)
return success
@asyncio.coroutine
def async_setup(hass, config):
"""Start the MQTT protocol service."""
conf = config.get(DOMAIN)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: {}})[DOMAIN]
client_id = conf.get(CONF_CLIENT_ID)
keepalive = conf.get(CONF_KEEPALIVE)
# Only setup if embedded config passed in or no broker specified
if CONF_EMBEDDED not in conf and CONF_BROKER in conf:
broker_config = None
else:
broker_config = yield from _async_setup_server(hass, config)
if CONF_BROKER in conf:
broker = conf[CONF_BROKER]
port = conf[CONF_PORT]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
certificate = conf.get(CONF_CERTIFICATE)
client_key = conf.get(CONF_CLIENT_KEY)
client_cert = conf.get(CONF_CLIENT_CERT)
tls_insecure = conf.get(CONF_TLS_INSECURE)
protocol = conf[CONF_PROTOCOL]
elif broker_config:
# If no broker passed in, auto config to internal server
broker, port, username, password, certificate, protocol = broker_config
# Embedded broker doesn't have some ssl variables
client_key, client_cert, tls_insecure = None, None, None
else:
err = "Unable to start MQTT broker."
if conf.get(CONF_EMBEDDED) is not None:
# Explicit embedded config, requires explicit broker config
err += " (Broker configuration required.)"
_LOGGER.error(err)
return False
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
# When the certificate is set to auto, use bundled certs from requests
if certificate == 'auto':
certificate = requests.certs.where()
will_message = conf.get(CONF_WILL_MESSAGE)
birth_message = conf.get(CONF_BIRTH_MESSAGE)
# Be able to override versions other than TLSv1.0 under Python3.6
conf_tls_version = conf.get(CONF_TLS_VERSION)
if conf_tls_version == '1.2':
tls_version = ssl.PROTOCOL_TLSv1_2
elif conf_tls_version == '1.1':
tls_version = ssl.PROTOCOL_TLSv1_1
elif conf_tls_version == '1.0':
tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
# Python3.6 supports automatic negotiation of highest TLS version
if sys.hexversion >= 0x03060000:
tls_version = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
tls_version = ssl.PROTOCOL_TLSv1
try:
hass.data[DATA_MQTT] = MQTT(
hass, broker, port, client_id, keepalive, username, password,
certificate, client_key, client_cert, tls_insecure, protocol,
will_message, birth_message, tls_version)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker itself")
return False
@asyncio.coroutine
def async_stop_mqtt(event):
"""Stop MQTT component."""
yield from hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
success = yield from hass.data[DATA_MQTT].async_connect()
if not success:
return False
@asyncio.coroutine
def async_publish_service(call):
"""Handle MQTT publish service calls."""
msg_topic = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos = call.data[ATTR_QOS]
retain = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = \
template.Template(payload_template, hass).async_render()
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to '%s': rendering payload template of "
"'%s' failed because %s",
msg_topic, payload_template, exc)
return
yield from hass.data[DATA_MQTT].async_publish(
msg_topic, payload, qos, retain)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service,
descriptions.get(SERVICE_PUBLISH), schema=MQTT_PUBLISH_SCHEMA)
if conf.get(CONF_DISCOVERY):
yield from _async_setup_discovery(hass, config)
return True
class MQTT(object):
"""Home Assistant MQTT client."""
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate, client_key, client_cert,
tls_insecure, protocol, will_message, birth_message,
tls_version):
"""Initialize Home Assistant MQTT client."""
import paho.mqtt.client as mqtt
self.hass = hass
self.broker = broker
self.port = port
self.keepalive = keepalive
self.topics = {}
self.progress = {}
self.birth_message = birth_message
self._mqttc = None
self._paho_lock = asyncio.Lock(loop=hass.loop)
if protocol == PROTOCOL_31:
proto = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(
certificate, certfile=client_cert,
keyfile=client_key, tls_version=tls_version)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
if will_message:
self._mqttc.will_set(will_message.get(ATTR_TOPIC),
will_message.get(ATTR_PAYLOAD),
will_message.get(ATTR_QOS),
will_message.get(ATTR_RETAIN))
@asyncio.coroutine
def async_publish(self, topic, payload, qos, retain):
"""Publish a MQTT message.
This method must be run in the event loop and returns a coroutine.
"""
with (yield from self._paho_lock):
yield from self.hass.async_add_job(
self._mqttc.publish, topic, payload, qos, retain)
@asyncio.coroutine
def async_connect(self):
"""Connect to the host. Does process messages yet.
This method is a coroutine.
"""
result = yield from self.hass.async_add_job(
self._mqttc.connect, self.broker, self.port, self.keepalive)
if result != 0:
import paho.mqtt.client as mqtt
_LOGGER.error('Failed to connect: %s', mqtt.error_string(result))
else:
self._mqttc.loop_start()
return not result
def async_disconnect(self):
"""Stop the MQTT client.
This method must be run in the event loop and returns a coroutine.
"""
def stop():
"""Stop the MQTT client."""
self._mqttc.disconnect()
self._mqttc.loop_stop()
return self.hass.async_add_job(stop)
@asyncio.coroutine
def async_subscribe(self, topic, qos):
"""Subscribe to a topic.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("topic need to be a string!")
with (yield from self._paho_lock):
if topic in self.topics:
return
result, mid = yield from self.hass.async_add_job(
self._mqttc.subscribe, topic, qos)
_raise_on_error(result)
self.progress[mid] = topic
self.topics[topic] = None
@asyncio.coroutine
def async_unsubscribe(self, topic):
"""Unsubscribe from topic.
This method is a coroutine.
"""
result, mid = yield from self.hass.async_add_job(
self._mqttc.unsubscribe, topic)
_raise_on_error(result)
self.progress[mid] = topic
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code):
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error('Unable to connect to the MQTT broker: %s',
mqtt.connack_string(result_code))
self._mqttc.disconnect()
return
old_topics = self.topics
self.topics = {key: value for key, value in self.topics.items()
if value is None}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self.hass.add_job(self.async_subscribe, topic, qos)
if self.birth_message:
self.hass.add_job(self.async_publish(
self.birth_message.get(ATTR_TOPIC),
self.birth_message.get(ATTR_PAYLOAD),
self.birth_message.get(ATTR_QOS),
self.birth_message.get(ATTR_RETAIN)))
def _mqtt_on_subscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Subscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos[0]
def _mqtt_on_message(self, _mqttc, _userdata, msg):
"""Message received callback."""
dispatcher_send(
self.hass, SIGNAL_MQTT_MESSAGE_RECEIVED, msg.topic, msg.payload,
msg.qos
)
def _mqtt_on_unsubscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Unsubscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code):
"""Disconnected callback."""
self.progress = {}
self.topics = {key: value for key, value in self.topics.items()
if value is not None}
# Remove None values from topic list
for key in list(self.topics):
if self.topics[key] is None:
self.topics.pop(key)
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if self._mqttc.reconnect() == 0:
_LOGGER.info("Successfully reconnected to the MQTT server")
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
"Disconnected from MQTT (%s). Trying to reconnect in %s s",
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
"""Raise error if error result."""
if result != 0:
import paho.mqtt.client as mqtt
raise HomeAssistantError(
'Error talking to MQTT: {}'.format(mqtt.error_string(result)))
def _match_topic(subscription, topic):
"""Test if topic matches subscription."""
reg_ex_parts = []
suffix = ""
if subscription.endswith('#'):
subscription = subscription[:-2]
suffix = "(.*)"
sub_parts = subscription.split('/')
for sub_part in sub_parts:
if sub_part == "+":
reg_ex_parts.append(r"([^\/]+)")
else:
reg_ex_parts.append(re.escape(sub_part))
reg_ex = "^" + (r'\/'.join(reg_ex_parts)) + suffix + "$"
reg = re.compile(reg_ex)
return reg.match(topic) is not None
|
|
import numpy as np
import pdb,os
from scipy.optimize import fmin_slsqp,least_squares
import astropy.io.fits as pyfits
import scipy.interpolate as interp
import utilities.transformations as tr
def ampMeritFunction(voltages,distortion,ifuncs):
"""Simple merit function calculator.
voltages is 1D array of weights for the influence functions
distortion is 2D array of distortion map
ifuncs is 4D array of influence functions
shade is 2D array shade mask
Simply compute sum(ifuncs*voltages-distortion)**2)
"""
#Numpy way
r = np.dot(ifuncs,voltages)-distortion
res = np.mean((np.dot(ifuncs,voltages)-distortion)**2)
return res
def ampMeritFunction2(voltages,**kwargs):
"""Simple merit function calculator.
voltages is 1D array of weights for the influence functions
distortion is 2D array of distortion map
ifuncs is 4D array of influence functions
shade is 2D array shade mask
Simply compute sum(ifuncs*voltages-distortion)**2)
"""
#Numpy way
distortion = kwargs['inp'][0]
ifuncs = kwargs['inp'][1]
res = np.mean((np.dot(ifuncs,voltages)-distortion)**2)
return res, [], 0
def ampMeritDerivative(voltages,distortion,ifuncs):
"""Compute derivatives with respect to voltages of
simple RMS()**2 merit function
"""
res = np.dot(2*(np.dot(ifuncs,voltages)-distortion),ifuncs)/\
np.size(distortion)
return res
def ampMeritDerivative2(voltages,f,g,**kwargs):
"""Compute derivatives with respect to voltages of
simple RMS()**2 merit function
"""
distortion = kwargs['inp'][0]
ifuncs = kwargs['inp'][1]
res = np.dot(2*(np.dot(ifuncs,voltages)-distortion),ifuncs)/\
np.size(distortion)
return res.tolist(), [], 0
def rawOptimizer(ifs,dist,bounds=None,smin=0.,smax=5.):
"""Assumes ifs and dist are both in slope or amplitude space.
No conversion to slope will occur."""
#Create bounds list
if bounds is None:
bounds = []
for i in range(np.shape(ifs)[0]):
bounds.append((smin,smax))
#Get ifs in right format
ifs = ifs.transpose(1,2,0) #Last index is cell number
#Reshape ifs and distortion
sh = np.shape(ifs)
ifsFlat = ifs.reshape(sh[0]*sh[1],sh[2])
distFlat = dist.flatten()
#Call optimizer algoritim
optv = fmin_slsqp(ampMeritFunction,np.zeros(sh[2]),\
bounds=bounds,args=(distFlat,ifsFlat),\
iprint=2,fprime=ampMeritDerivative,iter=200,\
acc=1.e-10)
#Reconstruct solution
sol = np.dot(ifs,optv)
return sol,optv
def prepareIFs(ifs,dx=None,azweight=.015):
"""
Put IF arrays in format required by optimizer.
If dx is not None, apply derivative.
"""
#Apply derivative if necessary
#First element of result is axial derivative
if dx is not None:
ifs = np.array(np.gradient(ifs,*dx,axis=(1,2)))*180/np.pi*60.**2 / 1000.
ifs[1] = ifs[1]*azweight
ifs = ifs.transpose(1,0,2,3)
sha = np.shape(ifs)
for i in range(sha[0]):
for j in range(sha[1]):
ifs[i,j] = ifs[i,j] - np.nanmean(ifs[i,j])
ifs = ifs.reshape((sha[0],sha[1]*sha[2]*sha[3]))
else:
#ifs = ifs.transpose(1,2,0)
sha = np.shape(ifs)
for i in range(sha[0]):
ifs[i] = ifs[i] - np.nanmean(ifs[i])
ifs = ifs.reshape((sha[0],sha[1]*sha[2]))
return np.transpose(ifs)
def prepareDist(d,dx=None,azweight=.015):
"""
Put distortion array in format required by optimizer.
If dx is not None, apply derivative.
Can also be run on shademasks
"""
#Apply derivative if necessary
#First element of result is axial derivative
if dx is not None:
d = np.array(np.gradient(d,*dx))*180/np.pi*60.**2 / 1000.
d[0] = d[0] - np.nanmean(d[0])
d[1] = d[1] - np.nanmean(d[1])
d[1] = d[1]*azweight
return d.flatten()
def optimizer(distortion,ifs,shade,smin=0.,smax=5.,bounds=None,compare=False):
"""
Cleaner implementation of optimizer. ifs and distortion should
already be in whatever form (amplitude or slope) desired.
IFs should have had prepareIFs already run on them.
Units should be identical between the two.
"""
#Load in data
if type(distortion)==str:
distortion = pyfits.getdata(distortion)
if type(ifs)==str:
ifs = pyfits.getdata(ifs)
if type(shade)==str:
shade = pyfits.getdata(shade)
#Remove shademask
ifs = ifs[shade==1]
distortion = distortion[shade==1]
#Remove nans
ind = ~np.isnan(distortion)
ifs = ifs[ind]
distortion = distortion[ind]
if compare is True:
#Output arrays as fits to be compared using MATLAB
return ifs,distortion
#Handle bounds
if bounds is None:
bounds = []
for i in range(np.shape(ifs)[1]):
bounds.append((smin,smax))
#Call optimizer algorithm
optv = fmin_slsqp(ampMeritFunction,np.zeros(np.shape(ifs)[1]),\
bounds=bounds,args=(distortion,ifs),\
iprint=1,fprime=ampMeritDerivative,iter=200,\
acc=1.e-6)
return optv
def correctDistortion(dist,ifs,shade,dx=None,azweight=.015,smax=5.,\
bounds=None,compare=False):
"""
Wrapper function to apply and evaluate a correction
on distortion data.
Distortion and IFs are assumed to already be on the
same grid size.
dx should be in mm, dist and ifs should be in microns
"""
#Make sure shapes are correct
if not (np.shape(dist)==np.shape(ifs[0])==np.shape(shade)):
print 'Unequal shapes!'
return None
#Prepare arrays
distp = prepareDist(dist,dx=dx,azweight=azweight)
ifsp = prepareIFs(ifs,dx=dx,azweight=azweight)
shadep = prepareDist(shade)
#Run optimizer
res = optimizer(-distp,ifsp,shadep,smax=smax,bounds=bounds,compare=compare)
return res
def convertFEAInfluence(filename,Nx,Ny,method='cubic',\
cylcoords=True):
"""Read in Vanessa's CSV file for AXRO mirror
Mirror no longer assumed to be cylinder.
Need to regrid initial and perturbed nodes onto regular grid,
then compute radial difference.
"""
#Load FEA data
d = np.transpose(np.genfromtxt(filename,skip_header=1,delimiter=','))
if cylcoords is True:
r0 = d[1]*1e3
rm = np.mean(r0)
t0 = d[2]*np.pi/180. * rm #Convert to arc length in mm
z0 = d[3]*1e3
#r0 = np.repeat(220.497,len(t0))
r = r0 + d[4]*1e3
t = (d[2] + d[5])*np.pi/180. * rm #Convert to arc length in mm
z = z0 + d[6]*1e3
else:
x0 = d[2]*1e3
y0 = d[3]*1e3
z0 = d[4]*1e3
x = x0 + d[5]*1e3
y = y0 + d[6]*1e3
z = z0 + d[7]*1e3
#Convert to cylindrical
t0 = np.arctan2(x0,-z0)*220.497 #Convert to arc length in mm
r0 = np.sqrt(x0**2+z0**2)
z0 = y0
t = np.arctan2(x,-z)*220.497
r = np.sqrt(x**2+z**2)
z = y
#Construct regular grid
gy = np.linspace(z0.min(),z0.max(),Nx+2)
gx = np.linspace(t0.min(),t0.max(),Ny+2)
gx,gy = np.meshgrid(gx,gy)
#Run interpolation
g0 = interp.griddata((z0,t0),r0,(gy,gx),method=method)
g0[np.isnan(g0)] = 0.
g = interp.griddata((z,t),r,(gy,gx),method=method)
g[np.isnan(g)] = 0.
print filename + ' done'
return -(g0[1:-1,1:-1]-g[1:-1,1:-1]),g0[1:-1,1:-1],g[1:-1,1:-1]
def createShadePerimeter(sh,axialFraction=0.,azFraction=0.):
"""
Create a shademask where a fraction of the axial and
azimuthal perimeter is blocked.
Fraction is the fraction of blockage in each axis.
sh is shape tuple e.g. (200,200)
"""
arr = np.zeros(sh)
axIndex = int(round(sh[0]*axialFraction/2))
azIndex = int(round(sh[1]*azFraction/2))
arr[axIndex:-axIndex,azIndex:-azIndex] = 1.
return arr
|
|
""" This contains various helper functions for the Digital Slide Archive"""
import re, csv, os, sys, optparse
import collections
from PIL import Image
import openslide
from openslide.lowlevel import OpenSlideError
import hashlib
import subprocess
import shutil,glob
import random
from functools import partial
def md5sum(filename):
with open(filename, mode='rb') as f:
d = hashlib.md5()
for buf in iter(partial(f.read, 128), b''):
d.update(buf)
return d.hexdigest()
"""Default Directories """
DEFAULT_WSI_DIR = '/NDPI_VAULT/ADRC/'
DEFAULT_PYRAMID_DIR = '/bigdata3/PYRAMIDS/ADRC/'
DEFAULT_DATABASE = 'adrc_slide_database'
DEFAULT_IIP_SERVER_ADDRESS = "http://node15.cci.emory.edu/cgi-bin/iipsrv.fcgi?Zoomify=";
"""CDSA SPECIFIC VARIABLES AND PATHS """
tcga_tumor_types = [ 'acc','blca','blnp','blp','brca','cesc','cntl','coad','dlbc','esca','gbm','hnsc','kich','kirc','kirp','laml','lcll','lcml','lgg','lihc','luad',\
'lusc','meso','ov','paad','pcpg','prad','read','sarc','skcm','stad','tgct','thca','ucec','ucs','uvm']
PATH_REPORT_ROOT_DIRS = ['/bcr/intgen.org/pathology_reports/reports/','/bcr/nationwidechildrens.org/pathology_reports/reports/']
CLIN_REPORT_ROOT = '/bcr/biotab/clin/'
CLIN_REPORT_ROOT_DIRS = ['/bcr/biotab/clin/']
dl_dir = "/SYNOLOGY_TCGA_MIRROR/TCGA_LOCAL_MIRROR/"
TCGA_LOCAL_ROOT_DIR = dl_dir + 'tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/'
TCGA_HTTP_ROOT_URL = 'https://tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/'
"""PARAMETERS AND VARIABLE INITIALIZATION """
verbose = 0
default_level = ',0 ' ### default layer to use for ndpi2tiff
ndpi_count = 0
_verbose = 0
_verbose = 1
script_id_num = 3800 ### going to increment from some number...maybe ill make this random later
class LinePrinter():
"""
Print things to stdout on one line dynamically
"""
def __init__(self,data):
sys.stdout.write("\r\x1b[K"+data.__str__())
sys.stdout.flush()
"""
REGULAR EXPRESSION
"""
parse_tcga_tissue_and_stain_type = re.compile(r'org_(..*)\.(diagnostic|tissue)_images',re.IGNORECASE)
parse_TCGA_SUBJECT_ID = re.compile(r'(TCGA-..-....)')
parse_full_TCGA_ID = re.compile(r'(TCGA-..-....)-(\d\d)(.)-([^-]*)',re.IGNORECASE)
adrc_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.*)\.ndpi$', re.IGNORECASE)
adrc_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi$',re.IGNORECASE)
adrc_pat_three = re.compile(r'(E\d\d-\d+)_(\d+)_([^_]+)_(.*)\.ndpi$',re.IGNORECASE)
adrc_dzi_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.+)\.ndpi\.dzi\.tif$', re.IGNORECASE)
adrc_dzi_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi\.dzi\.tif$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi\.dzi\.tif',re.IGNORECASE)
adrc_dzi_pat_three = re.compile(r'(E\d\d-\d+)_(\d?)_(.+)_(.*)\.ndpi\.dzi\.tif$',re.IGNORECASE)
"""
Output files and other logs
"""
f_out = open('corrupt_svs_files.txt','a+')
def connect_to_db( host, user, passwd, db):
"""I will return two cursors to make my life easier """
try:
db_dict = MySQLdb.connect(host, user, passwd, db, cursorclass=MySQLdb.cursors.DictCursor )
db_dict_cursor = db_dict.cursor()
update_cursor = db_dict.cursor()
return( db_dict_cursor, update_cursor)
except:
print "Could not connect to the database!!!",host,user,passwd,db
sys.exit()
return (None,None)
def openslide_test_file(full_file_path,file_type,db_cursor):
"""This will use the openslide bindings to get the width, height and filesize for an image or return an Error otherwise"""
width=height=filesize=orig_resolution=slide_title=md5 = None
try:
im = openslide.open_slide(full_file_path)
(width, height) = im.dimensions
base_file_name = os.path.basename(full_file_path)
filesize = os.path.getsize(full_file_path)
if(file_type== 'svs'):
orig_resolution = im.properties['aperio.AppMag']
#md5 = md5Checksum(full_file_path)
slide_name = os.path.basename(full_file_path)
return(True,width,height,filesize,orig_resolution,slide_name,md5)
except OpenSlideError, e:
print "Openslide returned an error",full_file_path
print >>sys.stderr, "Verify failed with:", repr(e.args)
print "Openslide returned an error",full_file_path
f_out.write(full_file_path+';\n')
insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) "
print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) )
#update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) ))
return(False,None,None,None,None,None,None)
except StandardError, e:
#file name likely not valid
print >>sys.stderr, "Verify failed with:", repr(e.args)
print "Openslide returned an error",full_file_path
f_out.write(full_file_path+';\n')
insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) "
print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) )
#update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) ))
return(False,None,None,None,None,None,None)
except:
print "failed even earlier on",full_file_path
"""will log this to a file"""
return(False,width,height,filesize,orig_resolution,slide_title,md5)
return(False,width,height,filesize,orig_resolution,slide_title,md5)
def check_image_status_in_db(full_file_path,filetype,db_cursor):
""" this will do a lookup in the thumb database and see if the image is already there...
if it is... I don't bother do any additional file lookups
some of the metadata extraction can take a bit of time as I need to parse the PNG headers
filetype can be svs, bigtiff image, ndpi, pyramid image
"""
v = _verbose >= 1; vv = _verbose >= 2
if filetype == 'svs':
sql_lookup = "select count(*) as count from `svs_slide_info` where full_file_path='%s'" % (full_file_path)
db_cursor.execute(sql_lookup)
data = db_cursor.fetchone()
if data['count'] == 0:
if vv: print "Need to update entry"
(valid_image,width,height,filesize,orig_resolution,base_file_name,md5) = openslide_test_file(full_file_path,'svs',db_cursor)
if valid_image:
slide_folder = str(full_file_path.split('/')[-2])
sql = "insert into `svs_slide_info` ( slide_filename, image_width,image_height, resolution, full_file_path, slide_folder, filesize ,md5sum ) "
sql += " Values ('%s',%s,%s,%s,'%s', '%s',%d,'%s' ) " % ( base_file_name, width, height, orig_resolution, full_file_path, slide_folder, filesize ,md5)
db_cursor.execute(sql)
elif filetype == 'pyramid':
sql_lookup = "select count(*) as count from `dzi_pyramid_info` where full_file_path like ('"+full_file_path+"')"
db_cursor.execute(sql_lookup)
data = db_cursor.fetchone()
if data['count'] == 0:
if vv: print "Need to update entry"
(valid_image,width,height,filesize,orig_resolution,pyramid_file_name,md5) = openslide_test_file(full_file_path,'pyramid',db_cursor)
if valid_image:
slide_folder = str(full_file_path.split('/')[-2])
insert_sql = "insert into `dzi_pyramid_info` ( pyramid_filename, image_width, image_height, full_file_path, file_basename, filesize ,pyramid_folder) "\
+ " Values ('%s',%d,%d,'%s','%s', %d, '%s' ) " % ( pyramid_file_name, width, height, full_file_path , slide_folder, filesize , slide_folder)
print insert_sql
db_cursor.execute(insert_sql)
def set_active_archive_status(metadata_dict_cursor):
"""This will update and/or set the flag for a slide being an active archive from the TCGA data set"""
select_stmt = " select * from `latest_archive_info`"
print select_stmt
metadata_dict_cursor.execute(select_stmt)
result = metadata_dict_cursor.fetchall()
active_slide_archive = []
for row in result:
archive_name = row['ARCHIVE_NAME']
if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name:
# print archive_name
active_slide_archive.append(archive_name)
print "I have found",len(active_slide_archive),"active slid archives"
## i should probably set all rchives to null first..
####first set the entire thing to not have
update_stmt = "update svs_slide_info set active_tcga_slide='0'"
print update_stmt
metadata_dict_cursor.execute(update_stmt)
for cur_archive in active_slide_archive:
update_stmt = "update svs_slide_info set active_tcga_slide='1' where slide_folder='%s'" % cur_archive
print update_stmt
metadata_dict_cursor.execute(update_stmt)
"""Now need to check if file is on the filesystem
result = metadata_dict_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
"""
def validate_slide_pyramid_linkage(db_cursor,db_cursor_two):
select_stmt = " select * from `svs_slide_info`"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
invalid_pyramid_link = 0
print len(result),"rows to process"
for row in result:
#print row
invalid_row = False
pyramid = (row['pyramid_filename'])
if not os.path.isfile(pyramid):
print "Pyramid is missing...",pyramid
invalid_row = True
svs = (row['full_file_path'])
if not os.path.isfile(svs):
print "SVS is missing",svs
invalid_row = True
if os.path.basename(pyramid).split('.')[0] != os.path.basename(svs).split('.')[0]:
print svs,pyramid,"DONT SEEM TO MATCH"
print os.path.basename(pyramid),os.path.basename(svs)
invalid_row = True
if invalid_row:
del_sql = "delete from svs_slide_info where slide_id='%d'" % row['slide_id']
db_cursor_two.execute(del_sql)
##pyramid_file_name and full_file_path
def generate_slide_pyramid_linkage(db_cursor,db_cursor_two):
""" This will update the slide database and link the pyramids associated with the image.... will scan multiple
tables """
v = _verbose >= 1; vv = _verbose >= 2
v= True
vv = True
"""pyramid filenames match on slide_filename in the svs_slide_info table and slide_folder... there are the two
main keys"""
""" other fields of import include stain_type and main_project_name... this needs to be duplictable at some point
since a slide can be in more than one project.... other key field is tissue_type and patient_id
I may want to have this field iterate multiple fields one by one....
"""
## in the dzi_pyramid_info I have two fields that need to be dupdated...parent_slide_title and parent_slide_id
## probably only need one of these... other field thats relevant is pyramid_folder
select_stmt = " select * from `svs_slide_info` where pyramid_generated is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
matched_pyramids_found = 0
for row in result:
null_rows += 1
matched_pyramid_file = row['full_file_path'].replace('/bigdata/RAW_SLIDE_LINKS/CDSA/','/bigdata2/PYRAMIDS/CDSA/')+'.dzi.tif'
# print matched_pyramid_file
if(os.path.isfile(matched_pyramid_file)):
update_sql = "update svs_slide_info set pyramid_filename='%s',pyramid_generated='%d' where slide_id='%d'" % (matched_pyramid_file,True,row['slide_id'])
db_cursor.execute(update_sql)
matched_pyramids_found += 1
else:
pass
#//there should be a matching pyramid
#patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
# print patient_id
# if not patient_id[0] == None:
# else:
# print "Found no patient id...",full_file_path
print "there were",null_rows,"empty rows and",matched_pyramids_found,"matched pyramids"
select_stmt = " select * from `svs_slide_info` where patient_id is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
# print patient_id
null_rows += 1
if not patient_id[0] == None:
update_sql = "update svs_slide_info set patient_id='%s' where slide_id='%d'" % (patient_id[0],row['slide_id'])
db_cursor.execute(update_sql)
else:
print "Found no patient id...",full_file_path
print "there were",null_rows,"empty rows"
select_stmt = " select * from `svs_slide_info` where stain_type is NULL and tissue_type is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
(stain_type,tissue_type) = get_tcga_stain_type(full_file_path )
"""I originally AND 'ed the sql statement and it caused it to crash.... i guess that's the logical operator"""
null_rows += 1
if not stain_type == None and not tissue_type == None:
update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\
(stain_type,tissue_type,row['slide_id'])
db_cursor.execute(update_sql)
else:
print "Found no matching group type ...",full_file_path
print "there were",null_rows,"empty rows"
select_stmt = " select * from `dzi_pyramid_info` where parent_slide_id is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
pyramid_folder = row['pyramid_folder']
pyramid_filename = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the
## origin slide filename but has extra crap at the end...
## and also this can be a one to many relationship.. i.e. i may have pyramidized a file
## multiple times
pyramid_id = row['pyramid_id']
slide_filename = pyramid_filename.replace('.dzi.tif','')
### = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the the dzi.tif is the issue
pyramid_to_orig_slide_match = "select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename)
db_cursor_two.execute(pyramid_to_orig_slide_match)
slide_match_result = db_cursor_two.fetchall()
if slide_match_result:
for slide_row in slide_match_result:
print slide_row
slide_id = slide_row['slide_id']
"""so now that I found a match I need to reverse the lookup and get the pyramid id.."""
# set_slide_match_sql = "update svs_slide_info select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename)
set_pyramid_match_sql = "update dzi_pyramid_info set parent_slide_id='%d' where pyramid_id='%d'" %(slide_id,pyramid_id)
db_cursor_two.execute( set_pyramid_match_sql)
else:
# print "No match for",slide_filename,"so found a null file set",pyramid_folder
pass
""" null_rows += 1
if not stain_type == None and not tissue_type == None:
update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\
(stain_type,tissue_type,row['slide_id'])
metadata_cursor.execute(update_sql)
else:
print "Found no matching group type ...",full_file_path
print "there were",null_rows,"empty rows"
"""
def get_file_metadata ( input_file, file_type):
"""this function wil scan a system file and try axtract certain metadata about the file..
this will vary based on the root file type i.e. ndpi, svs, big tff, etc"""
print input_file, file_type
def find_clin_reports ( tumor_type ):
"""also grab all the clinical data....."""
clin_data = []
clin_data_struct = {}
""" it seems like the clinical data reports are the cleanest with nationwidechildrens """
for clin_rpt_dir in CLIN_REPORT_ROOT_DIRS:
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+clin_rpt_dir
#print path_base_dir
for dpath, dnames, fnames in os.walk( path_base_dir, followlinks=True):
for file in fnames:
if '.txt' in file:
filebase = file.rstrip('.txt')
full_file_path = dpath+'/'+filebase
#full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
clin_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return clin_data_struct
def find_path_reports ( tumor_type ):
"""this will walk the directories and find pdf files that are path reports """
pdf_path_reports = []
path_data_struct = {}
"""Path reports seem to be in more than one base directory depending on if intgen or nationwides curated them"""
for PATH_REPORT_ROOT in PATH_REPORT_ROOT_DIRS:
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT
#print path_base_dir
for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True):
for file in fnames:
if '.pdf' in file:
filebase = file.rstrip('.pdf')
full_file_path = dpath+'/'+filebase
#full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return path_data_struct
def find_tcga_clinical_files ( tumor_type ):
"""this will walk the directories and find pdf files that are path reports """
pdf_path_reports = []
path_data_struct = {}
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT
#print path_base_dir
for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True):
for file in fnames:
if '.pdf' in file:
filebase = file.rstrip('.pdf')
#full_file_path = dpath+'/'+filebase
full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return path_data_struct
def find_ndpi_image_list( ndpi_root_path ):
"""project_name is passed along with the potentially more than one root image path for ndpi files"""
found_ndpi_files = []
ndpi_root_path = ndpi_root_path.rstrip('/')
for dpath, dnames, fnames in os.walk( ndpi_root_path, followlinks=True):
for file in fnames:
if '.ndpi' in file:
#filebase = file.rstrip('.ndpi')
#print dpath
found_ndpi_files.append(dpath +'/'+file)
print len(found_ndpi_files),"NDPI files were located"
return found_ndpi_files
def find_svs_image_list( project_name, svs_root_path_list ):
"""project_name is passed along with the potentially more than one root image path for ndpi files"""
found_svs_files = []
svs_files_found = 0
for svs_root_path in svs_root_path_list:
print svs_root_path
for dpath, dnames, fnames in os.walk( svs_root_path+project_name, followlinks=True):
for file in fnames:
if '.svs' in file:
filebase = file.rstrip('.svs')
full_filename = dpath+'/'+file
#check_image_status_in_db(full_filename,'svs') # change this to add corrupt files and bytes file found
# found_svs_files.append(filebase)
found_svs_files.append(full_filename)
svs_files_found += 1
output = "Processed: %d svsfiles " % \
(svs_files_found )
#corrupt_svs_count, total_gigapixels, total_bytes, old_batch_svs)
LinePrinter(output)
return(found_svs_files)
def find_pyramid_images( project_name, pyramid_root_dirs):
## first find the available resolutions...
pyramid_images = []
pyramids_found = 0
### I am going to add or scan for a 20X, 5X or 40X instead... and use that
for pyramid_root in pyramid_root_dirs:
if os.path.isdir(pyramid_root+project_name):
for dpath, dnames, fnames in os.walk( pyramid_root+project_name, followlinks=True):
for file in fnames:
if '.dzi.tif' in file.lower():
full_filename = dpath+'/'+file
pyramids_found += 1
if verbose: print file,dpath
#check_image_status_in_db(full_filename,'pyramid') # change this to add corrupt files and bytes file found
output = "Processed: %d pyramids" % pyramids_found
LinePrinter(output)
pyramid_images.append(full_filename)
return(pyramid_images)
def get_tcga_stain_type( string_to_check):
""" this function pulls out the stain and tissue type from the TCGA path file names """
m = parse_tcga_tissue_and_stain_type.search(string_to_check)
if m:
return (m.group(1),m.group(2) )
else:
return (None,None)
class Table:
def __init__(self, db, name):
self.db = db
self.name = name
self.dbc = self.db.cursor()
def __getitem__(self, item):
self.dbc.execute("select * from %s limit %s, 1" %(self.name, item))
return self.dbc.fetchone()
def __len__(self):
self.dbc.execute("select count(*) as count from %s" % (self.name))
count_info = self.dbc.fetchone()
l = int( count_info['count'] )
return l
"""
Acronyyms and abbreivations used as well as syntax info
wsi = whole slide image
-8 specifies bigtiff output and the -c sets the compression
pick the level to get which should be 0-- i.e. what layer am i trying to convert
"""
def check_for_valid_ADRC_ID( string_to_check):
"""a file should start with ideally ADRC##-#### or OS or osmething similar
Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """
m = adrc_pat_one.match(string_to_check)
m_second_pat = adrc_pat_two.match(string_to_check)
m_third_pat = adrc_pat_three.match(string_to_check)
if m:
patient_id = m.group(1)
section_id = m.group(2)
stain = m.group(3)
# print patient_id,section_id,stain
return(True)
elif m_second_pat:
patient_id = m_second_pat.group(1)
section_id = m_second_pat.group(2)
stain = m_second_pat.group(3)
# print patient_id,section_id,stain
return(True)
elif m_third_pat:
patient_id = m_third_pat.group(1)
section_id = m_third_pat.group(2)
stain = m_third_pat.group(3)
else:
print "no match",string_to_check
return(False)
def parse_slide_info_for_ADRC_ID( string_to_check):
"""a file should start with ideally ADRC##-#### or OS or osmething similar
Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """
stain_tag_normalization_dict = { "AB" : "Abeta", "ABETA" : "ABeta", "US_tau": "Tau", "US_pTDP" : "pTDP",
"TAU" : "Tau" , "TAU" : "tau", "US_AB" : "ABeta", "US_aSYN-4B12" : "aSyn-4B12",
"BIEL" : "Biel"}
m = adrc_dzi_pat_one.match(string_to_check)
m_second_pat = adrc_dzi_pat_two.match(string_to_check)
m_third_pat = adrc_dzi_pat_three.match(string_to_check)
if m:
patient_id = m.group(1)
section_id = m.group(2)
stain = m.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
elif m_second_pat:
patient_id = m_second_pat.group(1)
section_id = m_second_pat.group(2)
stain = m_second_pat.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
elif m_third_pat:
patient_id = m_third_pat.group(1)
section_id = m_third_pat.group(2)
stain = m_third_pat.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
else:
print "no match",string_to_check
return(False,None,None,None)
def get_tcga_id( string_to_check , get_full_tcga_id):
""" will either return the TCGA-12-3456 or the entire TCGA sample ID which is much much longer... TCGA-12-3456-12-23-232-32"""
if(get_full_tcga_id):
m = parse_full_TCGA_ID.match(string_to_check)
if m:
TCGA_FULL_ID = m.group(1)+'-'+m.group(2)+m.group(3)+'-'+m.group(4)
return (m.group(1),TCGA_FULL_ID)
else:
return None,None
m = parse_TCGA_SUBJECT_ID.match(string_to_check)
if m:
return (m.group(0),'None')
else:
return (None,None)
def set_database_slide_metadata(database,table):
"""this will iterate and update various project related attributes that may not be set on initial parse
such as stain type, tissue_type , etc... """
## update stain_Type first
sql_lookup = "select * from `"+ database + "`.`dzi_pyramid_info` where stain_type is NULL "
metadata_dict_cursor.execute(sql_lookup)
data = metadata_dict_cursor.fetchall()
for row in data:
# print row
(found_tags, patient_id, section_id, stain) = parse_slide_info_for_ADRC_ID( row['pyramid_filename'])
if found_tags:
update_sql = "update `" + database + "`.`"+"dzi_pyramid_info` set stain_type='%s' where pyramid_id='%d'" % ( stain, row['pyramid_id'])
print update_sql
update_cursor.execute(update_sql)
update_annotation_sql = "select * from `" + database + "`.`dzi_pyramid_info` where has_annotation is Null"
metadata_dict_cursor.execute(update_annotation_sql)
data = metadata_dict_cursor.fetchall()
for row in data:
print row
def update_annotations(database):
"""will find xml annotation files and update the database """
base_path = '/var/www/adrc_js/xml_annotation_files/'
# crawl looking for svs files
for dirpath, dirnames, filenames in os.walk(base_path, followlinks=True, onerror=_listdir_error):
for fname in filenames:
# NDPI (slide) file?
if 'xml' in fname:
file_with_path = os.path.join(dirpath, fname)
print file_with_path,dirpath,dirnames,filenames
base_filename = os.path.basename(fname)
base_filename = base_filename.replace('.xml','')
print base_filename
find_slide_sql = "select * from dzi_pyramid_info where pyramid_filename like '%s%%'" % (base_filename)
print find_slide_sql
metadata_dict_cursor.execute( find_slide_sql)
data = metadata_dict_cursor.fetchall()
for row in data:
print data
update_sql = "update dzi_pyramid_info set has_annotation='1' where pyramid_id='%d'" % (row['pyramid_id'])
print update_sql
update_cursor.execute(update_sql)
def gen_ndpi_pyramid(input_file,pyramid_file):
""" this is a new method that will convert an NDPI to a tiff without necessitating tiling"""
v = _verbose >= 1; vv = _verbose >= 2
ndpi2tiff_command = "/bigdata3/BIG_TIFF_IMAGES/ndpi2tiff -8 -t -c lzw:2 "
script_file_base_path = '/fastdata/tmp/SGE_SCRIPTS/'
SSD_TEMP_SPACE = '/fastdata/tmp/'
global script_id_num ### going to increment from some number...maybe ill make this random later
current_command_list = '#/bin/bash \n' ### set this to null... ill only open a script file if i actually run a command
delete_bigtiff_image = True ## determines if I should cleanup/delete the bigtiff i generate
## this is an intermediate file before pyramid generation
print input_file,pyramid_file
if not os.path.isfile(pyramid_file):
### for speed I am going to copy the input file to /fastdata/tmp..
### I am copying the input_file from its home to a cache dir of SSD goodness
ssd_cached_file = SSD_TEMP_SPACE + os.path.basename(input_file)
if v: print ssd_cached_file,"cached file name"
if not os.path.isfile(ssd_cached_file):
current_command_list += "sleep "+str(random.randint(1,180) ) + ' \n'
current_command_list += "cp "+input_file+' '+SSD_TEMP_SPACE+'\n'
## after deliberation copying from the script versus via ssh helps throttle disk copy from
## the long term image store which is slower..
## I decided to add a random sleep time of 0 - 180 seconds in each job
ndpi2tiff_command = ndpi2tiff_command + ssd_cached_file + default_level
if v: print ndpi2tiff_command
output_file = ssd_cached_file+',0.tif'
if not os.path.isfile(output_file):
current_command_list += ndpi2tiff_command +'\n'
pyramid_output_dir = os.path.dirname(pyramid_file)
if not os.path.isdir(pyramid_output_dir):
os.makedirs(pyramid_output_dir)
#vips_pyramid_output = cur_file.replace(input_dir,pyramid_directory) +'.dzi.tif'
vips_command = 'vips im_vips2tiff -v '+output_file+' '+pyramid_file+':jpeg:90,tile:256x256,pyramid,,,,8 '
print vips_command
current_command_list += vips_command
if v: print current_command_list
### now writing the script
current_bash_script = script_file_base_path+'ndpi2tiff-'+str(script_id_num)+'.sh'
f_out = open(current_bash_script,'w')
f_out.write(current_command_list)
if delete_bigtiff_image:
f_out.write('\n rm -rf \''+output_file+'\' \n')
f_out.write('rm -rf '+ssd_cached_file+' \n')
## this may be better to just not put part of the command script
script_id_num += 1
f_out.close()
sge_submit_cmd = "qsub -q slide_convert.q "+current_bash_script
print sge_submit_cmd
output = subprocess.check_output (sge_submit_cmd,stderr=subprocess.STDOUT, shell=True)
print output
def _listdir_error(error):
print >>sys.stderr, "Could not traverse/list:", error.filename
def check_files(wsi_dir=DEFAULT_WSI_DIR):
"""Checks for NDPI and SVS images
can probably be deleted...
Arguments:
wsi_dir -- The base directory to (recursively) search for .ndpi images.
Returns: counts of found images: (ndpi, pyramid)
"""
print "Parsing",wsi_dir
# sanity checks
if not os.path.isdir(wsi_dir):
raise IOError('SVS or NDPI base path is not a directory or is unreadable: ' + str(wsi_dir))
# get rid of any trailing slashes
wsi_dir = wsi_dir.rstrip('/')
global ndpi_count
# arg handling
v = _verbose >= 1; vv = _verbose >= 2
wsi_prefix_len = len(wsi_dir) + 1 # plus 1 for leading '/'
ndpi_pat = re.compile(r'.*\.ndpi$', re.IGNORECASE)
# crawl looking for svs files
for dirpath, dirnames, filenames in os.walk(wsi_dir, followlinks=True, onerror=_listdir_error):
for fname in filenames:
# NDPI (slide) file?
if ndpi_pat.match(fname):
ndpi_count +=1
file_with_path = os.path.join(dirpath, fname)
if v: print >>sys.stderr, "Slide: ", file_with_path
path_suffix = dirpath[wsi_prefix_len:]
path = fname.split('/')
file = path[len(path)-1]
### first check if the ndpi file is registered in our database...
check_image_status_in_db(file_with_path,'ndpi','adrc_slide_database','ndpi_slide_info')
if check_for_valid_ADRC_ID( file) or True :
input_file = os.path.join(dirpath)+'/'+file
pyramid_file = input_file.replace(DEFAULT_WSI_DIR,DEFAULT_PYRAMID_DIR)+'.dzi.tif'
if not os.path.isfile(pyramid_file):
print "Generate pyramid for",file
gen_ndpi_pyramid(input_file,pyramid_file)
else:
check_image_status_in_db(pyramid_file,'pyramid','adrc_slide_database','dzi_pyramid_info')
return ( ndpi_count)
def create_ADRC_schemas():
create_adrc_pyramid_schema = """
CREATE TABLE `dzi_pyramid_info` (
`pyramid_filename` varchar(200) DEFAULT NULL,
`image_width` int(10) unsigned DEFAULT NULL,
`image_height` int(10) unsigned DEFAULT NULL,
`resolution` int(11) DEFAULT '40',
`full_file_path` varchar(255) DEFAULT NULL,
`file_basename` varchar(100) DEFAULT NULL,
`filesize` int(10) unsigned DEFAULT NULL,
`parent_slide_filename` varchar(50) DEFAULT NULL,
`parent_slide_id` int(10) unsigned DEFAULT NULL,
`pyramid_folder` varchar(80) DEFAULT NULL,
`main_project_name` varchar(20) DEFAULT NULL,
`stain_type` varchar(30) DEFAULT NULL,
`tissue_type` varchar(30) DEFAULT NULL,
`pyramid_id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`pyramid_id`),
KEY `full_file_name` (`full_file_path`),
KEY `full_file_path` (`full_file_path`)
) ENGINE=MyISAM ;
CREATE TABLE `corrupt_or_unreadable_pyramid_files` (
`full_file_name` text,
`filesize` int(10) unsigned DEFAULT NULL,
`active_archive` tinyint(4) DEFAULT NULL,
`pyramid_id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`pyramid_id`)
)
"""
print create_adrc_pyramid_schema
"""def main(args=None):
if args is None: args = sys.argv[1:]
global _verbose; _verbose = opts.verbose
currentdir = DEFAULT_WSI_DIR
# for currentdir in DIRS_WITH_IMAGES:
#check_files(wsi_dir=opts.wsi_dir)
# (ndpi_count) = check_files(currentdir+'ADRC61-128/') ## is running on node16
(ndpi_count) = check_files(currentdir)
# create_ADRC_schemas()
#et_database_slide_metadata('adrc_slide_database','dzi_pyramid_info')
# update_annotations('adrc_slide_databse')
print "NDPI slides:", ndpi_count
"""
def update_md5_values(database,table_to_crawl,primary_key,db_cursor, update_cursor):
#sql_lookup = "select * from `%s`.`%s` where md5sum is NULL and pyramid_folder like '%%BRCA%%' " % (database,table_to_crawl)
sql_lookup = "select * from `%s`.`%s` where md5sum is NULL " % (database,table_to_crawl)
db_cursor.execute(sql_lookup)
data = db_cursor.fetchall()
print len(data),"rows to process"
for row in data:
if os.path.isfile(row['full_file_path']):
print row
update_stmt = "update `%s`.`%s` set md5sum='%s' where %s='%s'" % (database,table_to_crawl,md5sum(row['full_file_path']),primary_key,row[primary_key])
print update_stmt
update_cursor.execute(update_stmt)
else:
print "missing",row
update_stmt = "delete from `%s`.`%s` where %s='%s'" % (database,table_to_crawl,primary_key,row[primary_key])
print update_stmt
#update_cursor.execute(update_stmt)
def locate_md5_collissions(database,table_to_crawl,db_cursor, update_cursor):
sql_lookup = "select md5sum, count(*) as count from `%s`.`%s` group by md5sum having count>1" % (database,table_to_crawl)
print sql_lookup
db_cursor.execute(sql_lookup)
data = db_cursor.fetchall()
print len(data),"rows to process"
md5_collision_list = []
for row in data:
#print row
md5_collision_list.append(row['md5sum'])
#print md5_collision_list
print len(md5_collision_list),"entries with 2 or more matching md5 values"
for md5 in md5_collision_list:
if md5 is not None:
dup_sql = "select * from `%s`.`%s` where md5sum='%s'" % (database,table_to_crawl,md5)
#print dup_sql
db_cursor.execute(dup_sql)
data = db_cursor.fetchall()
#print data[0]
print "------------NEXT ENTRY has %d---------------" % len(data)
#print data
filename = os.path.basename(data[0]['full_file_path'])
#print svs_filename
for row in data:
print row['pyramid_filename']
if filename not in row['full_file_path']:
base_tcga_id = filename.split('.')[0]
if base_tcga_id not in row['full_file_path']:
print "shit",filename,row['full_file_path'],base_tcga_id
print row
# print data[0]
#print update_stmt
#update_cursor.execute(update_stmt)
#pyramid_filename': '/bigdata2/PYRAMIDS/CDSA/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs.dzi.tif', 'active_tcga_slide': 0, 'resolution': 40L, 'md5sum': None, 'image_width': 113288L, 'pyramid_generated': 1, 'patient_id': 'TCGA-E2-A14Y', 'stain_type': 'BRCA', 'image_height': 84037L, 'filesize': 1971660649L, 'slide_folder': 'nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0', 'slide_filename': 'TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs', 'main_project_name': None, 'slide_id': 29602L,
# 'full_file_path': '/bigdata/RAW_SLIDE_LINKS/CDSA-LOCAL/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs',
# 'tissue_type': 'diagnostic'}
### find collisions across pyramid_filenames as well..
def find_rogue_pyramid_filenames(database,db_cursor,con_two):
"""so this will check and see if the full file path and the pyramid_filename are... the same file... im wondering if I screwed up at some point
and made the associations wrong"""
rogue_sql = "select * from `%s`.`svs_slide_info`" % (database)
print rogue_sql
db_cursor.execute(rogue_sql)
data = db_cursor.fetchall()
for row in data:
pyr = os.path.basename( row['pyramid_filename'])
svs = os.path.basename( row['full_file_path'] )
if svs not in pyr and pyr is not '':
print "SHIT, pyr=%s,svs=%s" % ( pyr,svs)
print row
def find_unlinked_files( db_cursor):
"""this will look for archive directories that do not have a corresponding link in the RAW_SLIDE_LINK
dir"""
select_stmt = " select * from `latest_archive_info`"
print select_stmt
db_cursor.execute(select_stmt)
result = db_cursor.fetchall()
active_slide_archive = []
for row in result:
archive_name = row['ARCHIVE_NAME']
if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name:
# print archive_name
active_slide_archive.append(archive_name)
print "I have found",len(active_slide_archive),"active slid archives"
link_path = '/bigdata/RAW_SLIDE_LINKS/CDSA/*/'
all_linked_dirs = glob.glob( link_path+'*')
currently_linked_dirs = [ os.path.basename(dir) for dir in all_linked_dirs]
for active_dir in active_slide_archive:
if active_dir not in currently_linked_dirs:
print "need to link",active_dir
return(active_slide_archive)
#(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod')
#import dsa_common_functions as dsa
#(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod')
#active_archive_list = dsa.find_unlinked_files(cur_one)
#active_archive_list
#history
"""Now need to check if file is on the filesystem
result = metadata_dict_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
"""
"""
"""
if __name__ == '__main__':
print "Nothing to do..."
#(con_one,con_two) = connect_to_db('localhost', 'root', 'cancersuckz!', 'cdsa_js_prod')
find_unlinked_files(con_one)
#update_md5_values('cdsa_js_prod','svs_slide_info','slide_id',con_one,con_two)
#locate_md5_collissions('cdsa_js_prod','svs_slide_info',con_one,con_two)
#locate_md5_collissions('cdsa_js_prod','dzi_pyramid_info',con_one,con_two)
validate_slide_pyramid_linkage(con_one,con_two)
#find_rogue_pyramid_filenames('cdsa_js_prod',con_one,con_two)
#update_md5_values('cdsa_js_prod','dzi_pyramid_info','pyramid_id',con_one,con_two)
generate_slide_pyramid_linkage(con_one,con_two)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkServicesOperations:
"""PrivateLinkServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PrivateLinkService":
"""Gets the specified private link service by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkService, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.PrivateLinkService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs: Any
) -> "_models.PrivateLinkService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkService')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkService"]:
"""Creates or updates an private link service in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param parameters: Parameters supplied to the create or update private link service operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.PrivateLinkService
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkService or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.PrivateLinkService]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link services in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link service in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
async def get_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get the specific private end point connection by specific private link service in the resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def update_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Approve or reject private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param parameters: Parameters supplied to approve or reject the private end point connection.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def _delete_private_endpoint_connection_initial(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_private_endpoint_connection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def begin_delete_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
service_name=service_name,
pe_connection_name=pe_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
def list_private_endpoint_connections(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets all private end point connections for a specific private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_private_endpoint_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_private_endpoint_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections'} # type: ignore
async def _check_private_link_service_visibility_initial(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service.
:param location: The location of the domain name.
:type location: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_initial(
location=location,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def _check_private_link_service_visibility_by_resource_group_initial(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_by_resource_group_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_by_resource_group_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility_by_resource_group(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service in the specified resource
group.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_by_resource_group_initial(
location=location,
resource_group_name=resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
def list_auto_approved_private_link_services(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
def list_auto_approved_private_link_services_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.thevirtualforge.s3filetransfermanager.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.thevirtualforge.s3filetransfermanager.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComThevirtualforgeS3filetransfermanagerModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('there is discrepancy between the architectures specified in module manifest and compiled binary.')
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
die('please update manifest to match module binary architectures.')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
"""Tools and arithmetics for monomials of distributed polynomials. """
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.basic import C
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.utilities import all, any, cythonized
from sympy.core.compatibility import cmp
def monomials(variables, degree):
r"""
Generate a set of monomials of the given total degree or less.
Given a set of variables ``V`` and a total degree ``N`` generate
a set of monomials of degree at most ``N``. The total number of
monomials is huge and is given by the following formula:
.. math::
\frac{(\#V + N)!}{\#V! N!}
For example if we would like to generate a dense polynomial of
a total degree $N = 50$ in 5 variables, assuming that exponents
and all of coefficients are 32-bit long and stored in an array we
would need almost 80 GiB of memory! Fortunately most polynomials,
that we will encounter, are sparse.
**Examples**
Consider monomials in variables ``x`` and ``y``::
>>> from sympy import monomials
>>> from sympy.abc import x, y
>>> sorted(monomials([x, y], 2))
[1, x, y, x**2, y**2, x*y]
>>> sorted(monomials([x, y], 3))
[1, x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, x**2*y]
"""
if not variables:
return set([S.One])
else:
x, tail = variables[0], variables[1:]
monoms = monomials(tail, degree)
for i in range(1, degree+1):
monoms |= set([ x**i * m for m in monomials(tail, degree-i) ])
return monoms
def monomial_count(V, N):
r"""
Computes the number of monomials.
The number of monomials is given by the following formula:
.. math::
\frac{(\#V + N)!}{\#V! N!}
where ``N`` is a total degree and ``V`` is a set of variables.
**Examples**
>>> from sympy import monomials, monomial_count
>>> from sympy.abc import x, y
>>> monomial_count(2, 2)
6
>>> M = monomials([x, y], 2)
>>> sorted(M)
[1, x, y, x**2, y**2, x*y]
>>> len(M)
6
"""
return C.factorial(V + N) / C.factorial(V) / C.factorial(N)
def monomial_lex_key(monom):
"""Key function for sorting monomials in lexicographic order. """
return monom
def monomial_grlex_key(monom):
"""Key function for sorting monomials in graded lexicographic order. """
return (sum(monom), monom)
def monomial_grevlex_key(monom):
"""Key function for sorting monomials in reversed graded lexicographic order. """
return (sum(monom), tuple(reversed(monom)))
_monomial_key = {
'lex' : monomial_lex_key,
'grlex' : monomial_grlex_key,
'grevlex' : monomial_grevlex_key,
}
def monomial_key(order=None):
"""
Return a function defining admissible order on monomials.
The result of a call to :func:`monomial_key` is a function which should
be used as a key to :func:`sorted` built-in function, to provide order
in a set of monomials of the same length.
Currently supported monomial orderings are:
1. lex - lexicographic order (default)
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
If the input argument is not a string but has ``__call__`` attribute,
then it will pass through with an assumption that the callable object
defines an admissible order on monomials.
"""
if order is None:
return _monomial_key['lex']
if isinstance(order, str):
try:
return _monomial_key[order]
except KeyError:
raise ValueError("supported monomial orderings are 'lex', 'grlex' and 'grevlex', got %r" % order)
elif hasattr(order, '__call__'):
return order
else:
raise ValueError("monomial ordering specification must be a string or a callable, got %s" % order)
def monomial_lex_cmp(a, b):
return cmp(a, b)
def monomial_grlex_cmp(a, b):
return cmp(sum(a), sum(b)) or cmp(a, b)
def monomial_grevlex_cmp(a, b):
return cmp(sum(a), sum(b)) or cmp(tuple(reversed(b)), tuple(reversed(a)))
_monomial_order = {
'lex' : monomial_lex_cmp,
'grlex' : monomial_grlex_cmp,
'grevlex' : monomial_grevlex_cmp,
}
def monomial_cmp(order):
"""
Returns a function defining admissible order on monomials.
Currently supported orderings are:
1. lex - lexicographic order
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
"""
try:
return _monomial_order[order]
except KeyError:
raise ValueError("expected valid monomial order, got %s" % order)
@cythonized("a,b")
def monomial_mul(A, B):
"""
Multiplication of tuples representing monomials.
Lets multiply `x**3*y**4*z` with `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_mul
>>> monomial_mul((3, 4, 1), (1, 2, 0))
(4, 6, 1)
which gives `x**4*y**5*z`.
"""
return tuple([ a + b for a, b in zip(A, B) ])
@cythonized("a,b,c")
def monomial_div(A, B):
"""
Division of tuples representing monomials.
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_div
>>> monomial_div((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`. However::
>>> monomial_div((3, 4, 1), (1, 2, 2)) is None
True
`x*y**2*z**2` does not divide `x**3*y**4*z`.
"""
C = [ a - b for a, b in zip(A, B) ]
if all([ c >= 0 for c in C ]):
return tuple(C)
else:
return None
@cythonized("a,b")
def monomial_gcd(A, B):
"""
Greatest common divisor of tuples representing monomials.
Lets compute GCD of `x**3*y**4*z` and `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_gcd
>>> monomial_gcd((3, 4, 1), (1, 2, 0))
(1, 2, 0)
which gives `x*y**2`.
"""
return tuple([ min(a, b) for a, b in zip(A, B) ])
@cythonized("a,b")
def monomial_lcm(A, B):
"""
Least common multiple of tuples representing monomials.
Lets compute LCM of `x**3*y**4*z` and `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_lcm
>>> monomial_lcm((3, 4, 1), (1, 2, 0))
(3, 4, 1)
which gives `x**3*y**4*z`.
"""
return tuple([ max(a, b) for a, b in zip(A, B) ])
@cythonized("i,n")
def monomial_max(*monoms):
"""
Returns maximal degree for each variable in a set of monomials.
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the maximal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomialtools import monomial_max
>>> monomial_max((3,4,5), (0,5,1), (6,3,9))
(6, 5, 9)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = max(M[i], n)
return tuple(M)
@cythonized("i,n")
def monomial_min(*monoms):
"""
Returns minimal degree for each variable in a set of monomials.
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the minimal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomialtools import monomial_min
>>> monomial_min((3,4,5), (0,5,1), (6,3,9))
(0, 3, 1)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = min(M[i], n)
return tuple(M)
class Monomial(object):
"""Class representing a monomial, i.e. a product of powers. """
__slots__ = ['data']
def __init__(self, *data):
self.data = tuple(map(int, data))
def __hash__(self):
return hash((self.__class__.__name__, self.data))
def __repr__(self):
return "Monomial(%s)" % ", ".join(map(str, self.data))
def as_expr(self, *gens):
"""Convert a monomial instance to a SymPy expression. """
return Mul(*[ gen**exp for gen, exp in zip(gens, self.data) ])
def __eq__(self, other):
if isinstance(other, Monomial):
return self.data == other.data
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __mul__(self, other):
if isinstance(other, Monomial):
return Monomial(*monomial_mul(self.data, other.data))
else:
raise TypeError("an instance of Monomial class expected, got %s" % other)
def __pow__(self, other):
n = int(other)
if not n:
return Monomial(*((0,)*len(self.data)))
elif n > 0:
data = self.data
for i in xrange(1, n):
data = monomial_mul(data, self.data)
return Monomial(*data)
else:
raise ValueError("a non-negative integer expected, got %s" % other)
def __div__(self, other):
if isinstance(other, Monomial):
result = monomial_div(self.data, other.data)
if result is not None:
return Monomial(*result)
else:
raise ExactQuotientFailed(self, other)
else:
raise TypeError("an instance of Monomial class expected, got %s" % other)
__floordiv__ = __truediv__ = __div__
def gcd(self, other):
"""Greatest common divisor of monomials. """
if isinstance(other, Monomial):
return Monomial(*monomial_gcd(self.data, other.data))
else:
raise TypeError("an instance of Monomial class expected, got %s" % other)
def lcm(self, other):
"""Least common multiple of monomials. """
if isinstance(other, Monomial):
return Monomial(*monomial_lcm(self.data, other.data))
else:
raise TypeError("an instance of Monomial class expected, got %s" % other)
@classmethod
def max(cls, *monomials):
"""Returns maximal degree for each variable in a set of monomials. """
return Monomial(*monomial_max(*[ monomial.data for monomial in monomials ]))
@classmethod
def min(cls, *monomials):
"""Returns minimal degree for each variable in a set of monomials. """
return Monomial(*monomial_min(*[ monomial.data for monomial in monomials ]))
|
|
from tests.markup._util import desired_output
import pytest
def simple_schema():
from flatland import Form, String
class SmallForm(Form):
name = "test"
valued = String
empty = String
return SmallForm({u'valued': u'val'})
### value
@pytest.fixture
def gen():
pytest.skip('FIXME: Test not converted yet')
@desired_output('xhtml', simple_schema)
def value_default():
"""<input name="test_valued" value="val" />"""
@value_default.genshi
def test_value_default_genshi():
"""<input form:bind="form.valued"/>"""
@value_default.markup
def test_value_default_markup(gen, el):
return gen.input(el['valued'])
@desired_output('xhtml', simple_schema)
def value_disabled():
"""<input name="test_valued" />"""
@value_disabled.genshi
def test_with_value_disabled_genshi():
"""
<form:with auto-value="off">
<input form:bind="form.valued"/>
</form:with>
"""
@value_disabled.markup
def test_with_value_disabled_markup(gen, el):
gen.begin(auto_value=False)
output = gen.input(el['valued'])
gen.end()
return output
@value_disabled.genshi
def test_set_value_disabled_genshi():
"""
<form:set auto-value="off"/>
<input form:bind="form.valued"/>
"""
@value_disabled.markup
def test_set_value_disabled_markup(gen, el):
gen.set(auto_value=False)
output = gen.input(el['valued'])
return output
@value_disabled.genshi
def test_element_value_disabled_genshi():
"""<input form:bind="form.valued" form:auto-value="off"/>"""
@value_disabled.markup
def test_element_value_disabled_markup(gen, el):
return gen.input(el['valued'], auto_value=False)
@value_disabled.genshi
def test_element_value_auto_genshi():
"""
<form:with auto-value="no">
<input form:bind="form.valued" form:auto-value="auto"/>
</form:with>
"""
@value_disabled.markup
def test_element_value_auto_markup(gen, el):
gen.begin(auto_value=False)
output = gen.input(el['valued'], auto_value="auto")
gen.end()
return output
### name
@desired_output('xhtml', simple_schema)
def name_default():
"""<form name="test"></form>"""
@name_default.genshi
def test_name_default_genshi():
"""<form form:bind="form"/>"""
@name_default.markup
def test_name_default_markup(gen, el):
return gen.form(el)
@desired_output('xhtml', simple_schema)
def name_disabled():
"""<form></form>"""
@name_disabled.genshi
def test_with_name_disabled_genshi():
"""
<form:with auto-name="off">
<form form:bind="form"/>
</form:with>
"""
@name_disabled.markup
def test_with_name_disabled_markup(gen, el):
gen.begin(auto_name=False)
output = gen.form(el)
gen.end()
return output
@name_disabled.genshi
def test_set_name_disabled_genshi():
"""
<form:set auto-name="off"/>
<form form:bind="form"/>
"""
@name_disabled.markup
def test_set_name_disabled_markup(gen, el):
gen.set(auto_name=False)
output = gen.form(el)
return output
@name_disabled.genshi
def test_element_name_disabled_genshi():
"""<form form:bind="form" form:auto-name="off"/>"""
@name_disabled.markup
def test_element_name_disabled_markup(gen, el):
return gen.form(el, auto_name=False)
@name_disabled.genshi
def test_element_name_auto_genshi():
"""
<form:with auto-name="no">
<form form:bind="form" form:auto-name="auto"/>
</form:with>
"""
@name_disabled.markup
def test_element_name_auto_markup(gen, el):
gen.begin(auto_name=False)
output = gen.form(el, auto_name="auto")
gen.end()
return output
### domid
@desired_output('xhtml', simple_schema)
def domid_default():
"""<select name="test_valued"></select>"""
@domid_default.genshi
def test_domid_default_genshi():
"""<select form:bind="form.valued"/>"""
@domid_default.markup
def test_domid_default_markup(gen, el):
return gen.select(el['valued'])
@desired_output('xhtml', simple_schema)
def domid_enabled():
"""<select name="test_valued" id="-test_valued-"></select>"""
@domid_enabled.genshi
def test_with_domid_enabled_genshi():
"""
<form:with auto-domid="on" domid-format="-%s-">
<select form:bind="form.valued"/>
</form:with>
"""
@domid_enabled.markup
def test_with_domid_enabled_markup(gen, el):
gen.begin(auto_domid=True, domid_format="-%s-")
output = gen.select(el['valued'])
gen.end()
return output
@domid_enabled.genshi
def test_set_domid_enabled_genshi():
"""
<form:set auto-domid="on" domid-format="-%s-" />
<select form:bind="form.valued"/>
"""
@domid_enabled.markup
def test_set_domid_enabled_markup(gen, el):
gen.set(auto_domid=True, domid_format="-%s-")
return gen.select(el['valued'])
@domid_enabled.genshi
def test_element_domid_enabled_genshi():
"""
<form:set domid-format="-%s-" />
<select form:bind="form.valued" form:auto-domid="on"/>
"""
@domid_enabled.markup
def test_element_domid_enabled_markup(gen, el):
gen.set(domid_format="-%s-")
return gen.select(el['valued'], auto_domid=True)
@domid_enabled.genshi
def test_element_domid_auto_genshi():
"""
<form:with auto-domid="on" domid-format="-%s-">
<select form:bind="form.valued" form:auto-domid="auto"/>
</form:with>
"""
@domid_enabled.markup
def test_element_domid_auto_markup(gen, el):
gen.begin(auto_domid=True, domid_format="-%s-")
output = gen.select(el['valued'], auto_domid="auto")
gen.end()
return output
### for
### tabindex
### filter
def filter1(tagname, attributes, contents, context, bind):
attributes['class'] = 'required'
contents += ' *'
return contents
@desired_output('xhtml', simple_schema, funky_filter=filter1)
def filter_enabled():
"""
<label class="required">field2 *</label>
"""
@filter_enabled.genshi
def test_with_filter_enabled_genshi():
"""
<form:with auto-filter="on" filters="[funky_filter]">
<label form:bind="form.valued">field2</label>
</form:with>
"""
@filter_enabled.markup
def test_with_filter_enabled_markup(gen, el, funky_filter):
gen.begin(auto_filter=True, filters=[funky_filter])
output = gen.label(el['valued'], contents='field2')
gen.end()
return output
|
|
# tempfile.py unit tests.
import tempfile
import errno
import io
import os
import signal
import shutil
import sys
import re
import warnings
import contextlib
import unittest
from test import test_support as support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = self.r.next()
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in xrange(TEST_FILES):
s = r.next()
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def test_supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, basestring)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(TC):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
our_temp_directory = tempfile.mkdtemp()
try:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError(-1)
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(IOError) as cm:
tempfile._get_default_tempdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(os.listdir(our_temp_directory), [])
open = io.open
def bad_writer(*args, **kwargs):
fp = open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer):
# test again with failing write()
with self.assertRaises(IOError) as cm:
tempfile._get_default_tempdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(os.listdir(our_temp_directory), [])
finally:
shutil.rmtree(our_temp_directory)
test_classes.append(TestGetDefaultTempdir)
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class TestBadTempdir:
def test_read_only_directory(self):
with _inside_empty_temp_dir():
oldmode = mode = os.stat(tempfile.tempdir).st_mode
mode &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(tempfile.tempdir, mode)
try:
if os.access(tempfile.tempdir, os.W_OK):
self.skipTest("can't set the directory read-only")
with self.assertRaises(OSError) as cm:
self.make_temp()
self.assertIn(cm.exception.errno, (errno.EPERM, errno.EACCES))
self.assertEqual(os.listdir(tempfile.tempdir), [])
finally:
os.chmod(tempfile.tempdir, oldmode)
def test_nonexisting_directory(self):
with _inside_empty_temp_dir():
tempdir = os.path.join(tempfile.tempdir, 'nonexistent')
with support.swap_attr(tempfile, 'tempdir', tempdir):
with self.assertRaises(OSError) as cm:
self.make_temp()
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_non_directory(self):
with _inside_empty_temp_dir():
tempdir = os.path.join(tempfile.tempdir, 'file')
open(tempdir, 'wb').close()
with support.swap_attr(tempfile, 'tempdir', tempdir):
with self.assertRaises(OSError) as cm:
self.make_temp()
self.assertIn(cm.exception.errno, (errno.ENOTDIR, errno.ENOENT))
class test__mkstemp_inner(TestBadTempdir, TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write("blat")
self.do_create(pre="a").write("blat")
self.do_create(suf="b").write("blat")
self.do_create(pre="a", suf="b").write("blat")
self.do_create(pre="aa", suf=".txt").write("blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write("blat")
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
@unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
@unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
self.do_create(bin=0).write("blat\n")
# XXX should test that the file really is a text file
def make_temp(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.template,
'',
tempfile._bin_openflags)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.make_temp()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.make_temp()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.make_temp()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, basestring)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write("blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TestBadTempdir, TC):
"""Test mkdtemp()."""
def make_temp(self):
return tempfile.mkdtemp()
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = range(TEST_FILES)
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, basestring)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_mode(self):
# mkdtemp creates directories with the proper mode
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0777 # Mask off sticky bits inherited from /tmp
expected = 0700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write('blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write('blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_no_leak_fd(self):
# Issue #21058: don't leak file descriptor when fdopen() fails
old_close = os.close
old_fdopen = os.fdopen
closed = []
def close(fd):
closed.append(fd)
def fdopen(*args):
raise ValueError()
os.close = close
os.fdopen = fdopen
try:
self.assertRaises(ValueError, tempfile.NamedTemporaryFile)
self.assertEqual(len(closed), 1)
finally:
os.close = old_close
os.fdopen = old_fdopen
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write('blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write('x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write('x' * 20)
self.assertFalse(f._rolled)
f.write('x' * 10)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_xreadlines(self):
f = self.do_create(max_size=20)
f.write(b'abc\n' * 5)
f.seek(0)
self.assertFalse(f._rolled)
self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5)
f.write(b'x\ny')
self.assertTrue(f._rolled)
f.seek(0)
self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5 + [b'x\n', b'y'])
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write('x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write('abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write("a" * 35)
write("b" * 35)
seek(0, 0)
self.assertTrue(read(70) == 'a'*35 + 'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write('abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write('blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
|
import logging
import os
import yaml
from code_intelligence import github_app
from code_intelligence import graphql
from code_intelligence import github_util
from code_intelligence import util
from label_microservice import automl_model
from label_microservice import combined_model
from label_microservice import universal_kind_label_model as universal_model
UNIVERSAL_MODEL_NAME = "universal"
def _combined_model_name(org, repo=None):
"""Return the name of the combined model for a repo or organization.
If repo is specified looks for a repo specific model. If repo is
none we return an org wide model.
Args:
org: Name of the org.
repo: (Optional) The name of the repo
"""
if repo:
return f"{org}/{repo}_combined"
return f"{org}_combined"
def _dict_has_keys(d, keys):
for k in keys:
if not k in d:
return False
return True
class IssueLabelPredictor:
"""Predict labels for an issue.
This class combines various model classes with logic to fetch information
about the issue.
This class doesn't attach the labels to the issues.
"""
def __init__(self):
# A dictionary mapping keys to individual models.
self._models = {}
self._load_models()
self._gh_client = graphql.GraphQLClient()
if not self._gh_client._headers:
logging.error("client._headers not set on GraphQLClient. This likely "
"means no GitHub credentials are loaded and requests to "
"GitHub API will likely fail")
def _load_models(self):
"""Load the models."""
logging.info("Loading the universal model")
self._models[UNIVERSAL_MODEL_NAME] = universal_model.UniversalKindLabelModel()
model_config_path = os.getenv("MODEL_CONFIG")
if model_config_path:
logging.info(f"Loading model config from {model_config_path}")
with open(model_config_path) as fh:
model_config = yaml.load(fh)
else:
logging.info("Environment variable MODEL_CONFIG not set; no config "
"loaded.")
for org in model_config.get("orgs", []):
org_name = org.get("name")
logging.info(f"Processing model config for org: {org_name}")
if org.get("automl_model"):
model = org.get("automl_model")
logging.info(f"Loading AutoML model for org: {org_name}; "
f"model: {model}")
org_model = automl_model.AutoMLModel(model_name=model)
self._models[f"{org_name}"] = org_model
combined = combined_model.CombinedLabelModels(
models=[self._models["universal"], org_model])
self._models[_combined_model_name(org_name)] = combined
def predict_labels_for_data(self, model_name, org, repo, title, text,
context=None):
"""Generate label predictions for the specified data.
Args:
model_name: Which model to use
org: org
repo: Repo name
title: Title for the issue
text: A list of strings representing the body and any comments on the
issue.
Returns
dict: str -> float; dictionary mapping labels to their probability
"""
if not model_name in self._models:
raise ValueError(f"No model named {model_name}")
model = self._models[model_name]
logging.info(f"Generating predictions for title={title} text={text} using"
f"model: {model_name} class:{model.__class__}", extra=context)
predictions = model.predict_issue_labels(org, repo, title, text,
context=context)
return predictions
def graphql_client(self, org, repo):
"""Return a GitHub GraphQL client for the specified org and repository.
Args:
org: The org.
repo: The repo
"""
# TODO(jlewi): Should we cache these?
ghapp = github_app.GitHubApp.create_from_env()
token_generator = github_app.GitHubAppTokenGenerator(
ghapp, f"{org}/{repo}")
gh_client = graphql.GraphQLClient(headers=token_generator.auth_headers)
return gh_client
def predict_labels_for_issue(self, org, repo, issue_number, model_name=None):
"""Generate label predictions for a github issue.
The function contacts GitHub to collect the required data.
Args:
org: The GitHub organization
repo: The repo that owns the issue
number: The github issue number
model_name: (Optional) the name of the model to use to generate
predictions. if not supplied it is inferred based on the repository.
Returns
dict: str -> float; dictionary mapping labels to their probability
"""
if not model_name:
org_model = _combined_model_name(org)
repo_model = _combined_model_name(org, repo)
if repo_model in self._models:
model_name = repo_model
elif org_model in self._models:
model_name = org_model
else:
model_name = UNIVERSAL_MODEL_NAME
logging.info(f"Predict labels for "
f"{org}/{repo}#{issue_number} using "
f"model {model_name}")
url = util.build_issue_url(org, repo, issue_number)
data = github_util.get_issue(url, self.graphql_client(org, repo))
if not data.get("title"):
logging.warning(f"Got empty title for {org}/{repo}#{issue_number}")
if not data.get("coment"):
logging.warning(f"Got empty body and comments for {org}/{repo}#{issue_number}")
context = {
"repo_owner": org,
"repo_name": repo,
"issue_num": issue_number,
}
predictions = self.predict_labels_for_data(
model_name, org, repo, data.get("title"), data.get("comments"),
context=context)
return predictions
def predict(self, data):
"""Generate predictions for the specified payload.
Args: data a dictionary containing the data to generate predictions for.
The payload can either look like
{
"repo_owner": <GitHub owner of the issue>
"repo_name": <GitHub repo>
"title": "some issue title"
"text": ["This is the body of the issue", "First comment"]
"model_name": Name of model to use
...
}
in this case predictions will be generated for this title and text.
or
{
"repo_owner": <GitHub owner of the issue>
"repo_name": <GitHub repo>
"issue_num": <Issue number>
"model_name": (optional) name of the model to use
...
}
"""
text_keys = ["title", "text", "model_name"]
issue_keys = ["repo_owner", "repo_name", "issue_num"]
if _dict_has_keys(data, text_keys): # pylint: disable=no-else-return
return self.predict_labels_for_data(data["model_name"], data["repo_owner"],
data["repo_name"]. data["title"],
data["text"])
elif _dict_has_keys(data, issue_keys): # pylint: disable=no-else-return
return self.predict_labels_for_issue(data["repo_owner"],
data["repo_name"],
data["issue_num"],
model_name=data.get("model_name"))
else:
actual = ",".join(data.keys())
text_str = ",".join(text_keys)
issue_str = ",".join(issue_keys)
want = f"[{text_str}] or [{issue_str}]"
logging.error(f"Data is missing required keys; got {actual}; want {want}")
raise ValueError(f"Data is missing required keys; got {actual}; want {want}")
|
|
# $Id$
#
# Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
from rdkit import RDConfig
import sys,time,math
from rdkit.ML.Data import Stats
import rdkit.DistanceGeometry as DG
from rdkit import Chem
import numpy
from rdkit.Chem import rdDistGeom as MolDG
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem import ChemicalForceFields
import Pharmacophore,ExcludedVolume
from rdkit import Geometry
_times = {}
from rdkit import RDLogger as logging
logger = logging.logger()
defaultFeatLength=2.0
def GetAtomHeavyNeighbors(atom):
""" returns a list of the heavy-atom neighbors of the
atom passed in:
>>> m = Chem.MolFromSmiles('CCO')
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(0))
>>> len(l)
1
>>> isinstance(l[0],Chem.Atom)
True
>>> l[0].GetIdx()
1
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(1))
>>> len(l)
2
>>> l[0].GetIdx()
0
>>> l[1].GetIdx()
2
"""
res=[]
for nbr in atom.GetNeighbors():
if nbr.GetAtomicNum() != 1:
res.append(nbr)
return res
def ReplaceGroup(match,bounds,slop=0.01,useDirs=False,dirLength=defaultFeatLength):
""" Adds an entry at the end of the bounds matrix for a point at
the center of a multi-point feature
returns a 2-tuple:
new bounds mat
index of point added
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> match = [0,1,2]
>>> bm,idx = ReplaceGroup(match,boundsMat,slop=0.0)
the index is at the end:
>>> idx == 3
True
and the matrix is one bigger:
>>> bm.shape == (4, 4)
True
but the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
We make the assumption that the points of the
feature form a regular polygon, are listed in order
(i.e. pt 0 is a neighbor to pt 1 and pt N-1)
and that the replacement point goes at the center:
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.577, 0.577, 0.577, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.155, 1.155, 1.155, 0.000
The slop argument (default = 0.01) is fractional:
>>> bm,idx = ReplaceGroup(match,boundsMat)
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.572, 0.572, 0.572, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.166, 1.166, 1.166, 0.000
"""
maxVal = -1000.0
minVal = 1e8
nPts = len(match)
for i in range(nPts):
idx0 = match[i]
if i<nPts-1:
idx1 = match[i+1]
else:
idx1 = match[0]
if idx1<idx0:
idx0,idx1 = idx1,idx0
minVal = min(minVal,bounds[idx1,idx0])
maxVal = max(maxVal,bounds[idx0,idx1])
maxVal *= (1+slop)
minVal *= (1-slop)
scaleFact = 1.0/(2.0*math.sin(math.pi/nPts))
minVal *= scaleFact
maxVal *= scaleFact
replaceIdx = bounds.shape[0]
if not useDirs:
bm = numpy.zeros((bounds.shape[0]+1,bounds.shape[1]+1),numpy.float)
else:
bm = numpy.zeros((bounds.shape[0]+2,bounds.shape[1]+2),numpy.float)
bm[0:bounds.shape[0],0:bounds.shape[1]]=bounds
bm[:replaceIdx,replaceIdx]=1000.
if useDirs:
bm[:replaceIdx+1,replaceIdx+1]=1000.
# set the feature - direction point bounds:
bm[replaceIdx,replaceIdx+1]=dirLength+slop
bm[replaceIdx+1,replaceIdx]=dirLength-slop
for idx1 in match:
bm[idx1,replaceIdx]=maxVal
bm[replaceIdx,idx1]=minVal
if useDirs:
# set the point - direction point bounds:
bm[idx1,replaceIdx+1] = numpy.sqrt(bm[replaceIdx,replaceIdx+1]**2+maxVal**2)
bm[replaceIdx+1,idx1] = numpy.sqrt(bm[replaceIdx+1,replaceIdx]**2+minVal**2)
return bm,replaceIdx
def EmbedMol(mol,bm,atomMatch=None,weight=2.0,randomSeed=-1,
excludedVolumes=None):
""" Generates an embedding for a molecule based on a bounds matrix and adds
a conformer (id 0) to the molecule
if the optional argument atomMatch is provided, it will be used to provide
supplemental weights for the embedding routine (used in the optimization
phase to ensure that the resulting geometry really does satisfy the
pharmacophore).
if the excludedVolumes is provided, it should be a sequence of
ExcludedVolume objects
>>> m = Chem.MolFromSmiles('c1ccccc1C')
>>> bounds = MolDG.GetMoleculeBoundsMatrix(m)
>>> bounds.shape == (7, 7)
True
>>> m.GetNumConformers()
0
>>> EmbedMol(m,bounds,randomSeed=23)
>>> m.GetNumConformers()
1
"""
nAts = mol.GetNumAtoms()
weights=[]
if(atomMatch):
for i in range(len(atomMatch)):
for j in range(i+1,len(atomMatch)):
weights.append((i,j,weight))
if(excludedVolumes):
for vol in excludedVolumes:
idx = vol.index
# excluded volumes affect every other atom:
for i in range(nAts):
weights.append((i,idx,weight))
coords = DG.EmbedBoundsMatrix(bm,weights=weights,numZeroFail=1,randomSeed=randomSeed)
#for row in coords:
# print(', '.join(['%.2f'%x for x in row]))
conf = Chem.Conformer(nAts)
conf.SetId(0)
for i in range(nAts):
conf.SetAtomPosition(i,list(coords[i]))
if excludedVolumes:
for vol in excludedVolumes:
vol.pos = numpy.array(coords[vol.index])
#print(' % 7.4f % 7.4f % 7.4f Ar 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(coords[-1]), file=sys.stderr)
mol.AddConformer(conf)
def AddExcludedVolumes(bm,excludedVolumes,smoothIt=True):
""" Adds a set of excluded volumes to the bounds matrix
and returns the new matrix
excludedVolumes is a list of ExcludedVolume objects
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> ev1 = ExcludedVolume.ExcludedVolume(([(0,),0.5,1.0],),exclusionDist=1.5)
>>> bm = AddExcludedVolumes(boundsMat,(ev1,))
the results matrix is one bigger:
>>> bm.shape == (4, 4)
True
and the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.500, 1.500, 1.500, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.000, 3.000, 3.000, 0.000
"""
oDim = bm.shape[0]
dim = oDim+len(excludedVolumes)
res = numpy.zeros((dim,dim),numpy.float)
res[:oDim,:oDim] = bm
for i,vol in enumerate(excludedVolumes):
bmIdx = oDim+i
vol.index = bmIdx
# set values to all the atoms:
res[bmIdx,:bmIdx] = vol.exclusionDist
res[:bmIdx,bmIdx] = 1000.0
# set values to our defining features:
for indices,minV,maxV in vol.featInfo:
for index in indices:
try:
res[bmIdx,index] = minV
res[index,bmIdx] = maxV
except IndexError:
logger.error('BAD INDEX: res[%d,%d], shape is %s'%(bmIdx,index,str(res.shape)))
raise IndexError
# set values to other excluded volumes:
for j in range(bmIdx+1,dim):
res[bmIdx,j:dim] = 0
res[j:dim,bmIdx] = 1000
if smoothIt: DG.DoTriangleSmoothing(res)
return res
def UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=False,
dirLength=defaultFeatLength,
mol=None):
""" loops over a distance bounds matrix and replaces the elements
that are altered by a pharmacophore
**NOTE** this returns the resulting bounds matrix, but it may also
alter the input matrix
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.0)
>>> pcophore.setUpperBound(0,1, 2.0)
>>> boundsMat = numpy.array([[0.0,3.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> atomMatch = ((0,),(1,))
>>> bm = UpdatePharmacophoreBounds(boundsMat,atomMatch,pcophore)
In this case, there are no multi-atom features, so the result matrix
is the same as the input:
>>> bm is boundsMat
True
this means, of course, that the input boundsMat is altered:
>>> print(', '.join(['%.3f'%x for x in boundsMat[0]]))
0.000, 2.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[1]]))
1.000, 0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[2]]))
2.000, 2.000, 0.000
"""
replaceMap = {}
for i,matchI in enumerate(atomMatch):
if len(matchI)>1:
bm,replaceIdx = ReplaceGroup(matchI,bm,useDirs=useDirs)
replaceMap[i] = replaceIdx
for i,matchI in enumerate(atomMatch):
mi = replaceMap.get(i,matchI[0])
for j in range(i+1,len(atomMatch)):
mj = replaceMap.get(j,atomMatch[j][0])
if mi<mj:
idx0,idx1 = mi,mj
else:
idx0,idx1 = mj,mi
bm[idx0,idx1] = pcophore.getUpperBound(i,j)
bm[idx1,idx0] = pcophore.getLowerBound(i,j)
return bm
def EmbedPharmacophore(mol,atomMatch,pcophore,randomSeed=-1,count=10,smoothFirst=True,
silent=False,bounds=None,excludedVolumes=None,targetNumber=-1,
useDirs=False):
""" Generates one or more embeddings for a molecule that satisfy a pharmacophore
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
- count: is the maximum number of attempts to make a generating an embedding
- smoothFirst: toggles triangle smoothing of the molecular bounds matix
- bounds: if provided, should be the molecular bounds matrix. If this isn't
provided, the matrix will be generated.
- targetNumber: if this number is positive, it provides a maximum number
of embeddings to generate (i.e. we'll have count attempts to generate
targetNumber embeddings).
returns: a 3 tuple:
1) the molecular bounds matrix adjusted for the pharmacophore
2) a list of embeddings (molecules with a single conformer)
3) the number of failed attempts at embedding
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 3.5)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> nFail
0
Set up a case that can't succeed:
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.0)
>>> pcophore.setUpperBound(0,1, 2.1)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
0
>>> nFail
10
"""
global _times
if not hasattr(mol,'_chiralCenters'):
mol._chiralCenters = Chem.FindMolChiralCenters(mol)
if bounds is None:
bounds = MolDG.GetMoleculeBoundsMatrix(mol)
if smoothFirst: DG.DoTriangleSmoothing(bounds)
bm = bounds.copy()
#print '------------'
#print 'initial'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol)
if excludedVolumes:
bm = AddExcludedVolumes(bm,excludedVolumes,smoothIt=False)
if not DG.DoTriangleSmoothing(bm):
raise ValueError("could not smooth bounds matrix")
#print '------------'
#print 'post replace and smooth'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
if targetNumber<=0:
targetNumber=count
nFailed = 0
res = []
for i in range(count):
tmpM = bm[:,:]
m2 = Chem.Mol(mol)
t1 = time.time()
try:
if randomSeed<=0:
seed = i*10+1
else:
seed = i*10+randomSeed
EmbedMol(m2,tmpM,atomMatch,randomSeed=seed,
excludedVolumes=excludedVolumes)
except ValueError:
if not silent:
logger.info('Embed failed')
nFailed += 1
else:
t2 = time.time()
_times['embed'] = _times.get('embed',0)+t2-t1
keepIt=True
for idx,stereo in mol._chiralCenters:
if stereo in ('R','S'):
vol = ComputeChiralVolume(m2,idx)
if (stereo=='R' and vol>=0) or \
(stereo=='S' and vol<=0):
keepIt=False
break
if keepIt:
res.append(m2)
else:
logger.debug('Removed embedding due to chiral constraints.')
if len(res)==targetNumber: break
return bm,res,nFailed
def isNaN(v):
""" provides an OS independent way of detecting NaNs
This is intended to be used with values returned from the C++
side of things.
We can't actually test this from Python (which traps
zero division errors), but it would work something like
this if we could:
>>> isNaN(0)
False
#>>> isNan(1/0)
#True
"""
if v!=v and sys.platform=='win32':
return True
elif v==0 and v==1 and sys.platform!='win32':
return True
return False
def OptimizeMol(mol,bm,atomMatches=None,excludedVolumes=None,
forceConstant=1200.0,
maxPasses=5,verbose=False):
""" carries out a UFF optimization for a molecule optionally subject
to the constraints in a bounds matrix
- atomMatches, if provided, is a sequence of sequences
- forceConstant is the force constant of the spring used to enforce
the constraints
returns a 2-tuple:
1) the energy of the initial conformation
2) the energy post-embedding
NOTE that these energies include the energies of the constraints
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 2.8)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> testM = embeds[0]
Do the optimization:
>>> e1,e2 = OptimizeMol(testM,bm,atomMatches=atomMatch)
Optimizing should have lowered the energy:
>>> e2 < e1
True
Check the constrained distance:
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
True
If we optimize without the distance constraints (provided via the atomMatches
argument) we're not guaranteed to get the same results, particularly in a case
like the current one where the pharmcophore brings the atoms uncomfortably
close together:
>>> testM = embeds[1]
>>> e1,e2 = OptimizeMol(testM,bm)
>>> e2 < e1
True
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
False
"""
try:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)
except Exception:
logger.info('Problems building molecular forcefield',exc_info=True)
return -1.0,-1.0
weights=[]
if(atomMatches):
for k in range(len(atomMatches)):
for i in atomMatches[k]:
for l in range(k+1,len(atomMatches)):
for j in atomMatches[l]:
weights.append((i,j))
for i,j in weights:
if j<i:
i,j = j,i
minV = bm[j,i]
maxV = bm[i,j]
ff.AddDistanceConstraint(i,j,minV,maxV,forceConstant)
if excludedVolumes:
nAts = mol.GetNumAtoms()
conf = mol.GetConformer()
idx = nAts
for exVol in excludedVolumes:
assert exVol.pos is not None
logger.debug('ff.AddExtraPoint(%.4f,%.4f,%.4f)'%(exVol.pos[0],exVol.pos[1],
exVol.pos[2]))
ff.AddExtraPoint(exVol.pos[0],exVol.pos[1],exVol.pos[2],True)
indices = []
for localIndices,foo,bar in exVol.featInfo:
indices += list(localIndices)
for i in range(nAts):
v = numpy.array(conf.GetAtomPosition(i))-numpy.array(exVol.pos)
d = numpy.sqrt(numpy.dot(v,v))
if i not in indices:
if d<5.0:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%d,%.0f)'%(i,idx,exVol.exclusionDist,1000,forceConstant))
ff.AddDistanceConstraint(i,idx,exVol.exclusionDist,1000,
forceConstant)
else:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%.3f,%.0f)'%(i,idx,bm[exVol.index,i],bm[i,exVol.index],forceConstant))
ff.AddDistanceConstraint(i,idx,bm[exVol.index,i],bm[i,exVol.index],
forceConstant)
idx += 1
ff.Initialize()
e1 = ff.CalcEnergy()
if isNaN(e1):
raise ValueError('bogus energy')
if verbose:
print(Chem.MolToMolBlock(mol))
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f As 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos), file=sys.stderr)
needsMore=ff.Minimize()
nPasses=0
while needsMore and nPasses<maxPasses:
needsMore=ff.Minimize()
nPasses+=1
e2 = ff.CalcEnergy()
if isNaN(e2):
raise ValueError('bogus energy')
if verbose:
print('--------')
print(Chem.MolToMolBlock(mol))
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f Sb 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos), file=sys.stderr)
ff = None
return e1,e2
def EmbedOne(mol,name,match,pcophore,count=1,silent=0,**kwargs):
""" generates statistics for a molecule's embeddings
Four energies are computed for each embedding:
1) E1: the energy (with constraints) of the initial embedding
2) E2: the energy (with constraints) of the optimized embedding
3) E3: the energy (no constraints) the geometry for E2
4) E4: the energy (no constraints) of the optimized free-molecule
(starting from the E3 geometry)
Returns a 9-tuple:
1) the mean value of E1
2) the sample standard deviation of E1
3) the mean value of E2
4) the sample standard deviation of E2
5) the mean value of E3
6) the sample standard deviation of E3
7) the mean value of E4
8) the sample standard deviation of E4
9) The number of embeddings that failed
"""
global _times
atomMatch = [list(x.GetAtomIds()) for x in match]
bm,ms,nFailed = EmbedPharmacophore(mol,atomMatch,pcophore,count=count,
silent=silent,**kwargs)
e1s = []
e2s = []
e3s = []
e4s = []
d12s = []
d23s = []
d34s = []
for m in ms:
t1 = time.time()
try:
e1,e2 = OptimizeMol(m,bm,atomMatch)
except ValueError:
pass
else:
t2 = time.time()
_times['opt1'] = _times.get('opt1',0)+t2-t1
e1s.append(e1)
e2s.append(e2)
d12s.append(e1-e2)
t1 = time.time()
try:
e3,e4 = OptimizeMol(m,bm)
except ValueError:
pass
else:
t2 = time.time()
_times['opt2'] = _times.get('opt2',0)+t2-t1
e3s.append(e3)
e4s.append(e4)
d23s.append(e2-e3)
d34s.append(e3-e4)
count += 1
try:
e1,e1d = Stats.MeanAndDev(e1s)
except Exception:
e1 = -1.0
e1d=-1.0
try:
e2,e2d = Stats.MeanAndDev(e2s)
except Exception:
e2 = -1.0
e2d=-1.0
try:
e3,e3d = Stats.MeanAndDev(e3s)
except Exception:
e3 = -1.0
e3d=-1.0
try:
e4,e4d = Stats.MeanAndDev(e4s)
except Exception:
e4 = -1.0
e4d=-1.0
if not silent:
print('%s(%d): %.2f(%.2f) -> %.2f(%.2f) : %.2f(%.2f) -> %.2f(%.2f)' %
(name,nFailed,e1,e1d,e2,e2d,e3,e3d,e4,e4d))
return e1,e1d,e2,e2d,e3,e3d,e4,e4d,nFailed
def MatchPharmacophoreToMol(mol, featFactory, pcophore):
""" generates a list of all possible mappings of a pharmacophore to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchPharmacophoreToMol(m,featFactory,pcophore)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
return MatchFeatsToMol(mol, featFactory, pcophore.getFeatures())
def _getFeatDict(mol,featFactory,features):
""" **INTERNAL USE ONLY**
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> d =_getFeatDict(m,featFactory,activeFeats)
>>> sorted(list(d.keys()))
['Acceptor', 'Donor']
>>> donors = d['Donor']
>>> len(donors)
1
>>> donors[0].GetAtomIds()
(3,)
>>> acceptors = d['Acceptor']
>>> len(acceptors)
2
>>> acceptors[0].GetAtomIds()
(0,)
>>> acceptors[1].GetAtomIds()
(3,)
"""
molFeats = {}
for feat in features:
family = feat.GetFamily()
if not family in molFeats:
matches = featFactory.GetFeaturesForMol(mol,includeOnly=family)
molFeats[family] = matches
return molFeats
def MatchFeatsToMol(mol, featFactory, features):
""" generates a list of all possible mappings of each feature to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchFeatsToMol(m,featFactory,activeFeats)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
molFeats = _getFeatDict(mol,featFactory,features)
res = []
for feat in features:
matches = molFeats.get(feat.GetFamily(),[])
if len(matches) == 0 :
return False, None
res.append(matches)
return True, res
def CombiEnum(sequence):
""" This generator takes a sequence of sequences as an argument and
provides all combinations of the elements of the subsequences:
>>> gen = CombiEnum(((1,2),(10,20)))
>>> next(gen)
[1, 10]
>>> next(gen)
[1, 20]
>>> [x for x in CombiEnum(((1,2),(10,20)))]
[[1, 10], [1, 20], [2, 10], [2, 20]]
>>> [x for x in CombiEnum(((1,2),(10,20),(100,200)))]
[[1, 10, 100], [1, 10, 200], [1, 20, 100], [1, 20, 200], [2, 10, 100], [2, 10, 200], [2, 20, 100], [2, 20, 200]]
"""
if not len(sequence):
yield []
elif len(sequence)==1:
for entry in sequence[0]:
yield [entry]
else:
for entry in sequence[0]:
for subVal in CombiEnum(sequence[1:]):
yield [entry]+subVal
def DownsampleBoundsMatrix(bm,indices,maxThresh=4.0):
""" removes rows from a bounds matrix that are
that are greater than a threshold value away from a set of
other points
returns the modfied bounds matrix
The goal of this function is to remove rows from the bounds matrix
that correspond to atoms that are likely to be quite far from
the pharmacophore we're interested in. Because the bounds smoothing
we eventually have to do is N^3, this can be a big win
>>> boundsMat = numpy.array([[0.0,3.0,4.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),3.5)
>>> bm.shape == (2, 2)
True
we don't touch the input matrix:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[0]]))
0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in bm[1]]))
2.000, 0.000
if the threshold is high enough, we don't do anything:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),5.0)
>>> bm.shape == (3, 3)
True
If there's a max value that's close enough to *any* of the indices
we pass in, we'll keep it:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,1),3.5)
>>> bm.shape == (3, 3)
True
"""
nPts = bm.shape[0]
k = numpy.zeros(nPts,numpy.int0)
for idx in indices: k[idx]=1
for i in indices:
row = bm[i]
for j in range(i+1,nPts):
if not k[j] and row[j]<maxThresh:
k[j]=1
keep = numpy.nonzero(k)[0]
bm2 = numpy.zeros((len(keep),len(keep)),numpy.float)
for i,idx in enumerate(keep):
row = bm[idx]
bm2[i] = numpy.take(row,keep)
return bm2
def CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=False):
"""
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.1)
>>> pcophore.setUpperBound(0,1, 1.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(1,2, 2.1)
>>> pcophore.setUpperBound(1,2, 3.9)
>>> bounds = numpy.array([[0,2,3],[1,0,4],[2,3,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((1,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((0,),(1,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((2,),(1,),(0,)),bounds,pcophore)
False
# we ignore the point locations here and just use their definitions:
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.1)
>>> pcophore.setUpperBound(0,1, 2.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(0,3, 2.1)
>>> pcophore.setUpperBound(0,3, 2.9)
>>> pcophore.setLowerBound(1,2, 1.1)
>>> pcophore.setUpperBound(1,2, 1.9)
>>> pcophore.setLowerBound(1,3, 1.1)
>>> pcophore.setUpperBound(1,3, 1.9)
>>> pcophore.setLowerBound(2,3, 1.1)
>>> pcophore.setUpperBound(2,3, 1.9)
>>> bounds = numpy.array([[0,3,3,3],[2,0,2,2],[2,1,0,2],[2,1,1,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,),(2,),(3,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(1,),(3,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(3,),(2,)),bounds,pcophore)
False
"""
for k in range(len(atomMatch)):
if len(atomMatch[k])==1:
for l in range(k+1,len(atomMatch)):
if len(atomMatch[l])==1:
idx0 = atomMatch[k][0]
idx1 = atomMatch[l][0]
if idx1<idx0:
idx0,idx1=idx1,idx0
if bounds[idx1,idx0] >= pcophore.getUpperBound(k, l) or \
bounds[idx0,idx1] <= pcophore.getLowerBound(k, l) :
if verbose:
print('\t (%d,%d) [%d,%d] fail'%(idx1,idx0,k,l))
print('\t %f,%f - %f,%f' %
(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
#logger.debug('\t >%s'%str(atomMatch))
#logger.debug()
#logger.debug('\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
# bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
return False
return True
def Check2DBounds(atomMatch,mol,pcophore):
""" checks to see if a particular mapping of features onto
a molecule satisfies a pharmacophore's 2D restrictions
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> pcophore.setUpperBound2D(0,1,3)
>>> m = Chem.MolFromSmiles('FCC(N)CN')
>>> Check2DBounds(((0,),(3,)),m,pcophore)
True
>>> Check2DBounds(((0,),(5,)),m,pcophore)
False
"""
dm = Chem.GetDistanceMatrix(mol,False,False,False)
nFeats = len(atomMatch)
for i in range(nFeats):
for j in range(i+1,nFeats):
lowerB = pcophore._boundsMat2D[j,i] #lowerB = pcophore.getLowerBound2D(i,j)
upperB = pcophore._boundsMat2D[i,j] #upperB = pcophore.getUpperBound2D(i,j)
dij=10000
for atomI in atomMatch[i]:
for atomJ in atomMatch[j]:
try:
dij = min(dij,dm[atomI,atomJ])
except IndexError:
print('bad indices:',atomI,atomJ)
print(' shape:',dm.shape)
print(' match:',atomMatch)
print(' mol:')
print(Chem.MolToMolBlock(mol))
raise IndexError
if dij<lowerB or dij>upperB:
return False
return True
def _checkMatch(match,mol,bounds,pcophore,use2DLimits):
""" **INTERNAL USE ONLY**
checks whether a particular atom match can be satisfied by
a molecule
"""
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if not atomMatch:
return None
elif use2DLimits:
if not Check2DBounds(atomMatch,mol,pcophore):
return None
if not CoarseScreenPharmacophore(atomMatch,bounds,pcophore):
return None
return atomMatch
def ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=False,
index=0,soFar=[]):
""" Enumerates the list of atom mappings a molecule
has to a particular pharmacophore.
We do check distance bounds here.
"""
nMatches = len(matches)
if index>=nMatches:
yield soFar,[]
elif index==nMatches-1:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
else:
atomMatch = ChemicalFeatures.GetAtomMatch(nextStep)
if atomMatch:
yield soFar+[entry],atomMatch
else:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
if not atomMatch:
continue
for val in ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=use2DLimits,
index=index+1,soFar=nextStep):
if val:
yield val
def MatchPharmacophore(matches,bounds,pcophore,useDownsampling=False,
use2DLimits=False,mol=None,excludedVolumes=None,
useDirs=False):
"""
if use2DLimits is set, the molecule must also be provided and topological
distances will also be used to filter out matches
"""
for match,atomMatch in ConstrainedEnum(matches,mol,pcophore,bounds,
use2DLimits=use2DLimits):
bm = bounds.copy()
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol);
if excludedVolumes:
localEvs = []
for eV in excludedVolumes:
featInfo = []
for i,entry in enumerate(atomMatch):
info = list(eV.featInfo[i])
info[0] = entry
featInfo.append(info)
localEvs.append(ExcludedVolume.ExcludedVolume(featInfo,eV.index,
eV.exclusionDist))
bm = AddExcludedVolumes(bm,localEvs,smoothIt=False)
sz = bm.shape[0]
if useDownsampling:
indices = []
for entry in atomMatch:
indices.extend(entry)
if excludedVolumes:
for vol in localEvs:
indices.append(vol.index)
bm = DownsampleBoundsMatrix(bm,indices)
if DG.DoTriangleSmoothing(bm):
return 0,bm,match,(sz,bm.shape[0])
return 1,None,None,None
def GetAllPharmacophoreMatches(matches,bounds,pcophore,useDownsampling=0,
progressCallback=None,
use2DLimits=False,mol=None,
verbose=False):
res = []
nDone = 0
for match in CombiEnum(matches):
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if atomMatch and use2DLimits and mol:
pass2D = Check2DBounds(atomMatch,mol,pcophore)
if verbose:
print('..',atomMatch)
print(' ..Pass2d:',pass2D)
else:
pass2D = True
if atomMatch and pass2D and \
CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=verbose):
if verbose:
print(' ..CoarseScreen: Pass')
bm = bounds.copy()
if verbose:
print('pre update:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore);
sz = bm.shape[0]
if verbose:
print('pre downsample:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
if useDownsampling:
indices = []
for entry in atomMatch:
indices += list(entry)
bm = DownsampleBoundsMatrix(bm,indices)
if verbose:
print('post downsample:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
if DG.DoTriangleSmoothing(bm):
res.append(match)
elif verbose:
print('cannot smooth')
nDone+=1
if progressCallback:
progressCallback(nDone)
return res
def ComputeChiralVolume(mol,centerIdx,confId=-1):
""" Computes the chiral volume of an atom
We're using the chiral volume formula from Figure 7 of
Blaney and Dixon, Rev. Comp. Chem. V, 299-335 (1994)
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
R configuration atoms give negative volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1) < 0
True
S configuration atoms give positive volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1) > 0
True
Non-chiral (or non-specified) atoms give zero volume:
>>> ComputeChiralVolume(mol,0) == 0.0
True
We also work on 3-coordinate atoms (with implicit Hs):
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1)<0
True
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1)>0
True
"""
conf = mol.GetConformer(confId)
Chem.AssignStereochemistry(mol)
center = mol.GetAtomWithIdx(centerIdx)
if not center.HasProp('_CIPCode'):
return 0.0
nbrs = center.GetNeighbors()
nbrRanks = []
for nbr in nbrs:
rank = int(nbr.GetProp('_CIPRank'))
pos = conf.GetAtomPosition(nbr.GetIdx())
nbrRanks.append((rank,pos))
# if we only have three neighbors (i.e. the determining H isn't present)
# then use the central atom as the fourth point:
if len(nbrRanks)==3:
nbrRanks.append((-1,conf.GetAtomPosition(centerIdx)))
nbrRanks.sort()
ps = [x[1] for x in nbrRanks]
v1 = ps[0]-ps[3]
v2 = ps[1]-ps[3]
v3 = ps[2]-ps[3]
res = v1.DotProduct(v2.CrossProduct(v3))
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import codecs
import copy
import logging
import os
import sys
import uuid
from datetime import datetime
from enum import Enum
import googleapiclient.discovery
from future.utils import iteritems
from google.cloud import bigquery
from google.cloud.bigquery.job import (CopyJobConfig, CreateDisposition, QueryJobConfig,
WriteDisposition)
from google.cloud.exceptions import NotFound
from google.oauth2 import service_account
from bqdm.model.schema import BigQuerySchemaField
from bqdm.model.table import BigQueryTable
from bqdm.util import (dump, echo, echo_dump, echo_ndiff)
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.StreamHandler(sys.stdout))
_logger.setLevel(logging.INFO)
class SchemaMigrationMode(Enum):
SELECT_INSERT = 'select_insert'
SELECT_INSERT_BACKUP = 'select_insert_backup'
REPLACE = 'replace'
REPLACE_BACKUP = 'replace_backup'
DROP_CREATE = 'drop_create'
DROP_CREATE_BACKUP = 'drop_create_backup'
class TableAction(object):
def __init__(self, executor, dataset_id,
migration_mode=None, backup_dataset_id=None, project=None,
credential_file=None, no_color=False, debug=False):
self._executor = executor
credentials = None
if credential_file:
credentials = service_account.Credentials.from_service_account_file(credential_file)
self._client = bigquery.Client(project, credentials)
self._api_client = googleapiclient.discovery.build('bigquery', 'v2',
credentials=credentials)
self._dataset_ref = self._client.dataset(dataset_id)
if backup_dataset_id:
self._backup_dataset_ref = self._client.dataset(backup_dataset_id)
else:
self._backup_dataset_ref = self._dataset_ref
if migration_mode:
self._migration_mode = SchemaMigrationMode(migration_mode)
else:
self._migration_mode = SchemaMigrationMode.SELECT_INSERT
self.no_color = no_color
if debug:
_logger.setLevel(logging.DEBUG)
@property
def dataset_reference(self):
return self._dataset_ref
@property
def dataset(self):
return self._client.get_dataset(self._dataset_ref)
@property
def exists_dataset(self):
try:
self._client.get_dataset(self._dataset_ref)
return True
except NotFound:
return False
@property
def backup_dataset_reference(self):
return self._backup_dataset_ref
@property
def backup_dataset(self):
return self._client.get_dataset(self._backup_dataset_ref)
@property
def exists_backup_dataset(self):
try:
self._client.get_dataset(self._backup_dataset_ref)
return True
except NotFound:
return False
@property
def migration_mode(self):
return self._migration_mode
@staticmethod
def get_add_tables(source, target):
table_ids = set(t.table_id for t in target) - set(s.table_id for s in source)
results = [t for t in target if t.table_id in table_ids]
return len(results), tuple(results)
@staticmethod
def get_change_tables(source, target):
_, add_tables = TableAction.get_add_tables(source, target)
results = (set(target) - set(add_tables)) - set(source)
return len(results), tuple(results)
@staticmethod
def get_destroy_tables(source, target):
table_ids = set(s.table_id for s in source) - set(t.table_id for t in target)
results = [s for s in source if s.table_id in table_ids]
return len(results), tuple(results)
def migrate(self, source_table, target_table, prefix=' ', fg='yellow'):
if self._migration_mode in [SchemaMigrationMode.SELECT_INSERT_BACKUP,
SchemaMigrationMode.REPLACE_BACKUP,
SchemaMigrationMode.DROP_CREATE_BACKUP]:
self.backup(source_table.table_id)
if self._migration_mode in [SchemaMigrationMode.SELECT_INSERT,
SchemaMigrationMode.SELECT_INSERT_BACKUP]:
query_field = TableAction.build_query_field(source_table.schema, target_table.schema)
self.select_insert(target_table.table_id, target_table.table_id, query_field)
elif self._migration_mode in [SchemaMigrationMode.REPLACE,
SchemaMigrationMode.REPLACE_BACKUP]:
tmp_table = self.create_temporary_table(target_table)
query_field = TableAction.build_query_field(source_table.schema, target_table.schema)
self.select_insert(source_table.table_id, tmp_table.table_id, query_field)
self._destroy(target_table, prefix, fg)
self._add(target_table, prefix, fg)
query_field = TableAction.build_query_field(target_table.schema, target_table.schema)
self.select_insert(tmp_table.table_id, target_table.table_id, query_field)
self._destroy(tmp_table, prefix, fg)
elif self._migration_mode in [SchemaMigrationMode.DROP_CREATE,
SchemaMigrationMode.DROP_CREATE_BACKUP]:
self._destroy(target_table, prefix, fg)
self._add(target_table, prefix, fg)
else:
raise ValueError('Unknown migration mode.')
def backup(self, source_table_id, prefix=' ', fg='yellow'):
source_table = self.dataset.table(source_table_id)
backup_table_id = 'backup_{source_table_id}_{timestamp}'.format(
source_table_id=source_table_id,
timestamp=datetime.utcnow().strftime('%Y%m%d%H%M%S%f'))
backup_table = self.backup_dataset.table(backup_table_id)
job_config = CopyJobConfig()
job_config.create_disposition = CreateDisposition.CREATE_IF_NEEDED
job = self._client.copy_table(source_table, backup_table, job_config=job_config)
echo('Backing up... {0}'.format(job.job_id),
prefix=prefix, fg=fg, no_color=self.no_color)
job.result()
assert job.state == 'DONE'
error_result = job.error_result
if error_result:
raise RuntimeError(job.errors)
def select_insert(self, source_table_id, destination_table_id, query_field,
prefix=' ', fg='yellow'):
query = 'SELECT {query_field} FROM {dataset_id}.{source_table_id}'.format(
query_field=query_field,
dataset_id=self._dataset_ref.dataset_id,
source_table_id=source_table_id)
destination_table = self.dataset.table(destination_table_id)
job_config = QueryJobConfig()
job_config.use_legacy_sql = False
job_config.use_query_cache = False
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.destination = destination_table
job = self._client.query(query, job_config)
echo('Inserting... {0}'.format(job.job_id),
prefix=prefix, fg=fg, no_color=self.no_color)
echo(' {0}'.format(job.query),
prefix=prefix, fg=fg, no_color=self.no_color)
job.result()
assert job.state == 'DONE'
error_result = job.error_result
if error_result:
raise RuntimeError(job.errors)
@staticmethod
def build_query_field(source_schema, target_schema, prefix=None):
query_fields = []
for target in target_schema:
source = next((s for s in source_schema if s.name == target.name), None)
if source:
if target.field_type == 'RECORD':
field_prefix = '{0}.{1}'.format(
prefix, target.name) if prefix else target.name
fields = TableAction.build_query_field(source.fields, target.fields,
field_prefix)
query_fields.append('struct({fields}) AS {alias}'.format(
fields=fields, alias=target.name))
else:
name = '{0}.{1}'.format(prefix, target.name) if prefix else target.name
field_type = BigQuerySchemaField.normalize_field_type(target.field_type)
query_fields.append('cast({name} AS {type}) AS {alias}'.format(
name=name, type=field_type, alias=target.name))
else:
if target.field_type == 'RECORD':
field_prefix = '{0}.{1}'.format(
prefix, target.name) if prefix else target.name
fields = TableAction.build_query_field((), target.fields,
field_prefix)
query_fields.append('struct({fields}) AS {alias}'.format(
fields=fields, alias=target.name))
else:
query_fields.append('null AS {alias}'.format(alias=target.name))
return ', '.join(query_fields)
def update_schema_description(self, target_table):
request = self._api_client.tables().patch(
projectId=self._client.project,
datasetId=self.dataset.dataset_id,
tableId=target_table.table_id,
body={
'schema': target_table.schema_dict()
}
)
request.execute()
def create_temporary_table(self, model):
tmp_table_model = copy.deepcopy(model)
tmp_table_id = str(uuid.uuid4()).replace('-', '_')
tmp_table_model.table_id = tmp_table_id
tmp_table = BigQueryTable.to_table(self._dataset_ref, tmp_table_model)
echo(' Temporary table creating... {0}'.format(tmp_table.path),
fg='yellow', no_color=self.no_color)
self._client.create_table(tmp_table)
return tmp_table_model
def get_table(self, table_id):
table_ref = self.dataset.table(table_id)
table = None
try:
table = self._client.get_table(table_ref)
echo('Load table: ' + table.path)
table = BigQueryTable.from_table(table)
except NotFound:
_logger.info('Table {0} is not found.'.format(table_id))
return table
def _list_tables(self):
return self._client.list_tables(self.dataset)
def list_tables(self):
if not self.exists_dataset:
return []
fs = [self._executor.submit(self.get_table, t.table_id)
for t in self._client.list_tables(self.dataset)]
return fs
def _export(self, output_dir, table_id):
table = self.get_table(table_id)
data = dump(table)
_logger.debug(data)
export_path = os.path.join(output_dir, '{0}.yml'.format(table.table_id))
echo('Export table config: {0}'.format(export_path))
with codecs.open(export_path, 'wb', 'utf-8') as f:
f.write(data)
return table
def export(self, output_dir):
output_dir = os.path.join(output_dir, self._dataset_ref.dataset_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tables = self._list_tables()
fs = [self._executor.submit(self._export, output_dir, table.table_id)
for table in tables]
if not tables:
keep_file = os.path.join(output_dir, '.gitkeep')
if not os.path.exists(keep_file):
open(keep_file, 'a').close()
return fs
def _add(self, model, prefix=' ', fg='green'):
table = BigQueryTable.to_table(self._dataset_ref, model)
echo('Adding... {0}'.format(table.path),
prefix=prefix, fg=fg, no_color=self.no_color)
echo_dump(model, prefix=prefix + ' ', fg=fg, no_color=self.no_color)
self._client.create_table(table)
echo()
def plan_add(self, source, target, prefix=' ', fg='green'):
count, tables = self.get_add_tables(source, target)
_logger.debug('Add tables: {0}'.format(tables))
for table in tables:
echo('+ {0}'.format(table.table_id),
prefix=prefix, fg=fg, no_color=self.no_color)
echo_dump(table, prefix=prefix + ' ', fg=fg, no_color=self.no_color)
echo()
return count
def add(self, source, target, prefix=' ', fg='yellow'):
count, tables = self.get_add_tables(source, target)
_logger.debug('Add tables: {0}'.format(tables))
fs = [self._executor.submit(self._add, t, prefix, fg) for t in tables]
return count, fs
def _change(self, source_model, target_model, prefix=' ', fg='yellow'):
table = BigQueryTable.to_table(self._dataset_ref, target_model)
echo('Changing... {0}'.format(table.path),
prefix=prefix, fg=fg, no_color=self.no_color)
echo_ndiff(source_model, target_model, prefix=prefix + ' ', fg=fg)
source_labels = source_model.labels
if source_labels:
labels = table.labels.copy()
for k, v in iteritems(source_labels):
if k not in labels.keys():
labels[k] = None
table.labels = labels
if target_model.partitioning_type != source_model.partitioning_type:
assert self._migration_mode not in [
SchemaMigrationMode.SELECT_INSERT,
SchemaMigrationMode.SELECT_INSERT_BACKUP],\
'Migration mode: `{0}` not supported.'.format(self._migration_mode.value)
target_schema_exclude_description = target_model.schema_exclude_description()
source_schema_exclude_description = source_model.schema_exclude_description()
if target_schema_exclude_description != source_schema_exclude_description or \
target_model.partitioning_type != source_model.partitioning_type:
self.migrate(source_model, target_model)
if target_schema_exclude_description == source_schema_exclude_description and \
target_model.schema != source_model.schema:
self.update_schema_description(target_model)
self._client.update_table(table, [
'friendly_name',
'description',
'expires',
'view_use_legacy_sql',
'view_query',
'labels',
])
echo()
def plan_change(self, source, target, prefix=' ', fg='yellow'):
count, tables = self.get_change_tables(source, target)
_logger.debug('Change tables: {0}'.format(tables))
for table in tables:
echo('~ {0}'.format(table.table_id),
prefix=prefix, fg=fg, no_color=self.no_color)
source_table = next((s for s in source if s.table_id == table.table_id), None)
echo_ndiff(source_table, table, prefix=prefix + ' ', fg=fg)
echo()
return count
def change(self, source, target, prefix=' ', fg='yellow'):
count, tables = self.get_change_tables(source, target)
_logger.debug('Change tables: {0}'.format(tables))
fs = [self._executor.submit(
self._change, next((s for s in source if s.table_id == t.table_id), None),
t, prefix, fg) for t in tables]
return count, fs
def _destroy(self, model, prefix=' ', fg='red'):
table = BigQueryTable.to_table(self._dataset_ref, model)
echo('Destroying... {0}'.format(table.path),
prefix=prefix, fg=fg, no_color=self.no_color)
self._client.delete_table(table)
echo()
def plan_destroy(self, source, target, prefix=' ', fg='red'):
count, tables = self.get_destroy_tables(source, target)
_logger.debug('Destroy tables: {0}'.format(tables))
for table in tables:
echo('- {0}'.format(table.table_id),
prefix=prefix, fg=fg, no_color=self.no_color)
echo()
return count
def destroy(self, source, target, prefix=' ', fg='red'):
count, tables = self.get_destroy_tables(source, target)
_logger.debug('Destroy tables: {0}'.format(tables))
fs = [self._executor.submit(self._destroy, t, prefix, fg) for t in tables]
return count, fs
|
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the BoxPlot class which lets you build your BoxPlot plots just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from ._chartobject import ChartObject
from ..objects import ColumnDataSource, FactorRange, Range1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BoxPlot(ChartObject):
"""This is the BoxPlot class and it is in charge of plotting
scatter plots in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (rects, lines and markers)
taking the references from the source.
Examples:
from collections import OrderedDict
import numpy as np
from bokeh.charts import BoxPlot
from bokeh.sampledata.olympics2014 import data
data = {d['abbr']: d['medals'] for d in data['data'] if d['medals']['total'] > 0}
countries = sorted(data.keys(), key=lambda x: data[x]['total'], reverse=True)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
boxplot = BoxPlot(medals, marker="circle", outliers=True,
title="boxplot, dict_input", xlabel="medal type", ylabel="medal count",
width=800, height=600, notebook=True)
boxplot.show()
"""
def __init__(self, value, marker="circle", outliers=True,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="categorical", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False):
""" Initialize a new BoxPlot.
Args:
value (DataFrame or OrderedDict/dict): containing the data with names as a key
and the data as a value.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., `circle`.
outliers (bool, optional): Whether or not to plot outliers.
title (str, optional): the title of your plot. Defaults to None.
xlabel (str, optional): the x-axis label of your plot.
Defaults to None.
ylabel (str, optional): the y-axis label of your plot.
Defaults to None.
legend (str, optional): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Defaults to None.
xscale (str, optional): the x-axis type scale of your plot. It can be
``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
yscale (str, optional): the y-axis type scale of your plot. It can be
``linear``, ``date`` or ``categorical``.
Defaults to ``linear``.
width (int, optional): the width of your plot in pixels.
Defaults to 800.
height (int, optional): the height of you plot in pixels.
Defaults to 600.
tools (bool, optional): to enable or disable the tools in your plot.
Defaults to True
filename (str or bool, optional): the name of the file where your plot.
will be written. If you pass True to this argument, it will use
``untitled`` as a filename.
Defaults to False.
server (str or bool, optional): the name of your plot in the server.
If you pass True to this argument, it will use ``untitled``
as the name in the server.
Defaults to False.
notebook (bool, optional):if you want to output (or not) your plot into the
IPython notebook.
Defaults to False.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
xdr (obj): x-associated datarange object for you plot,
initialized as a dummy None.
ydr (obj): y-associated datarange object for you plot,
initialized as a dummy None.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
self.value = value
self.__marker = marker
self.__outliers = outliers
self.xdr = None
self.ydr = None
self.data_segment = dict()
self.attr_segment = []
self.data_rect = dict()
self.attr_rect = []
self.data_scatter = dict()
self.attr_scatter = []
self.data_legend = dict()
super(BoxPlot, self).__init__(title, xlabel, ylabel, legend,
xscale, yscale, width, height,
tools, filename, server, notebook)
def marker(self, marker="circle"):
"marker (str, int): the marker type of your plot outliers."
self._marker = marker
return self
def outliers(self, outliers=True):
"outliers (bool): to show (or not) the outliers in each group of your plot."
self._outliers = outliers
return self
def check_attr(self):
"""Check if any of the chained method were used.
If they were not used, it assign the init parameters content by default.
"""
super(BoxPlot, self).check_attr()
if not hasattr(self, '_marker'):
self._marker = self.__marker
if not hasattr(self, '_outliers'):
self._outliers = self.__outliers
def get_data(self, marker, outliers, **value):
"""Take the BoxPlot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad, segments and markers glyphs inside the ``draw`` method.
Args:
cat (list): categories as a list of strings.
marker (int or string, optional): if outliers=True, the marker type to use
e.g., ``circle``.
outliers (bool, optional): Whether to plot outliers.
values (dict or pd obj): the values to be plotted as bars.
"""
# assuming value is a OrdererDict
self.value = value
if isinstance(self.value, pd.DataFrame):
self.groups = self.value.columns
else:
self.groups = list(self.value.keys())
self.marker = marker
self.outliers = outliers
# add group to the self.data_segment dict
self.data_segment["groups"] = self.groups
# add group and witdh to the self.data_rect dict
self.data_rect["groups"] = self.groups
self.data_rect["width"] = [0.8] * len(self.groups)
# self.data_scatter does not need references to groups now,
# they will be added later.
# add group to the self.data_legend dict
self.data_legend["groups"] = self.groups
# all the list we are going to use to save calculated values
q0_points = []
q2_points = []
iqr_centers = []
iqr_lengths = []
lower_points = []
upper_points = []
upper_center_boxes = []
upper_height_boxes = []
lower_center_boxes = []
lower_height_boxes = []
out_x, out_y, out_color = ([], [], [])
self.palette = self._set_colors(self.groups)
for i, level in enumerate(self.groups):
# Compute quantiles, center points, heights, IQR, etc.
# quantiles
q = np.percentile(self.value[level], [25, 50, 75])
q0_points.append(q[0])
q2_points.append(q[2])
# IQR related stuff...
iqr_centers.append((q[2] + q[0]) / 2)
iqr = q[2] - q[0]
iqr_lengths.append(iqr)
lower = q[1] - 1.5 * iqr
upper = q[1] + 1.5 * iqr
lower_points.append(lower)
upper_points.append(upper)
# rect center points and heights
upper_center_boxes.append((q[2] + q[1]) / 2)
upper_height_boxes.append(q[2] - q[1])
lower_center_boxes.append((q[1] + q[0]) / 2)
lower_height_boxes.append(q[1] - q[0])
# Store indices of outliers as list
outliers = np.where((self.value[level] > upper) | (self.value[level] < lower))[0]
out = self.value[level][outliers]
for o in out:
out_x.append(level)
out_y.append(o)
out_color.append(self.palette[i])
# Store
self._set_and_get(self.data_scatter, self.attr_scatter, "out_x", out_x)
self._set_and_get(self.data_scatter, self.attr_scatter, "out_y", out_y)
self._set_and_get(self.data_scatter, self.attr_scatter, "colors", out_color)
self._set_and_get(self.data_segment, self.attr_segment, "q0", q0_points)
self._set_and_get(self.data_segment, self.attr_segment, "lower", lower_points)
self._set_and_get(self.data_segment, self.attr_segment, "q2", q2_points)
self._set_and_get(self.data_segment, self.attr_segment, "upper", upper_points)
self._set_and_get(self.data_rect, self.attr_rect, "iqr_centers", iqr_centers)
self._set_and_get(self.data_rect, self.attr_rect, "iqr_lengths", iqr_lengths)
self._set_and_get(self.data_rect, self.attr_rect, "upper_center_boxes", upper_center_boxes)
self._set_and_get(self.data_rect, self.attr_rect, "upper_height_boxes", upper_height_boxes)
self._set_and_get(self.data_rect, self.attr_rect, "lower_center_boxes", lower_center_boxes)
self._set_and_get(self.data_rect, self.attr_rect, "lower_height_boxes", lower_height_boxes)
self._set_and_get(self.data_rect, self.attr_rect, "colors", self.palette)
def get_source(self):
"Push the BoxPlot data into the ColumnDataSource and calculate the proper ranges."
self.source_segment = ColumnDataSource(self.data_segment)
self.source_scatter = ColumnDataSource(self.data_scatter)
self.source_rect = ColumnDataSource(self.data_rect)
self.source_legend = ColumnDataSource(self.data_legend)
self.xdr = FactorRange(factors=self.source_segment.data["groups"])
start_y = min(self.data_segment[self.attr_segment[1]])
end_y = max(self.data_segment[self.attr_segment[3]])
## Expand min/max to encompass outliers
if self.outliers:
start_out_y = min(self.data_scatter[self.attr_scatter[1]])
end_out_y = max(self.data_scatter[self.attr_scatter[1]])
# it could be no outliers in some sides...
start_y = min(start_y, start_out_y)
end_y = max(end_y, end_out_y)
self.ydr = Range1d(start=start_y - 0.1 * (end_y - start_y),
end=end_y + 0.1 * (end_y - start_y))
def draw(self):
"""Use the several glyphs to display the Boxplot.
It uses the selected marker glyph to display the points, segments to
display the iqr and rects to display the boxes, taking as reference
points the data loaded at the ColumnDataSurce.
"""
self.chart.make_segment(self.source_segment, "groups", self.attr_segment[1],
"groups", self.attr_segment[0], "black", 2)
self.chart.make_segment(self.source_segment, "groups", self.attr_segment[2],
"groups", self.attr_segment[3], "black", 2)
self.chart.make_rect(self.source_rect, "groups", self.attr_rect[0],
"width", self.attr_rect[1], None, "black", 2)
self.chart.make_rect(self.source_rect, "groups", self.attr_rect[2],
"width", self.attr_rect[3], self.attr_rect[6], "black", None)
self.chart.make_rect(self.source_rect, "groups", self.attr_rect[4],
"width", self.attr_rect[5], self.attr_rect[6], "black", None)
if self.outliers:
self.chart.make_scatter(self.source_scatter, self.attr_scatter[0],
self.attr_scatter[1], self.marker, self.attr_scatter[2])
# We build the legend here using dummy glyphs
for i, level in enumerate(self.groups):
self.chart.make_rect(self.source_legend, "groups", None, None, None,
self.palette[i], "black", None)
# We need to manually select the proper glyphs to be rendered as legends
indexes = [6, 7, 8] # 1st group, 2nd group, 3rd group
self.chart.glyphs = [self.chart.glyphs[i] for i in indexes]
def show(self):
"""Main BoxPlot show method.
It essentially checks for chained methods, creates the chart,
pass data into the plot object, draws the glyphs according
to the data and shows the chart in the selected output.
.. note:: the show method can not be chained. It has to be called
at the end of the chain.
"""
# we need to check the chained method attr
self.check_attr()
# we create the chart object
self.create_chart()
# we start the plot (adds axis, grids and tools)
self.start_plot(xgrid=False)
# we get the data from the incoming input
self.get_data(self._marker, self._outliers, **self.value)
# we filled the source and ranges with the calculated data
self.get_source()
# we dynamically inject the ranges into the plot
self.add_data_plot(self.xdr, self.ydr)
# we add the glyphs into the plot
self.draw()
# we pass info to build the legend
self.end_plot(self.groups)
# and finally we show it
self.show_chart()
# Some helper methods
def _set_and_get(self, data, attr, val, content):
"""Set a new attr and then get it to fill the self.data dict.
Keep track of the attributes created.
Args:
data (dict): where to store the new attribute content
attr (list): where to store the new attribute names
val (string): name of the new attribute
content (obj): content of the new attribute
"""
setattr(self, val, content)
data[val] = getattr(self, val)
attr.append(val)
|
|
from __future__ import print_function
import os
#import StringIO
import scipy.misc
import numpy as np
from glob import glob
from tqdm import trange
from itertools import chain
from collections import deque
from scipy.linalg import sqrtm
from numpy.linalg import norm
from models import *
from utils import save_image
ry:
import fid
except ImportError:
print("fid.py not found. Please download fid.py from the TTUR github repository.")
raise SystemExit()
def next(loader):
return loader.next()[0].data.numpy()
def to_nhwc(image, data_format):
if data_format == 'NCHW':
new_image = nchw_to_nhwc(image)
else:
new_image = image
return new_image
def to_nchw_numpy(image):
if image.shape[3] in [1, 3]:
new_image = image.transpose([0, 3, 1, 2])
else:
new_image = image
return new_image
def norm_img(image, data_format=None):
image = image/127.5 - 1.
if data_format:
image = to_nhwc(image, data_format)
return image
def denorm_img(norm, data_format):
return tf.clip_by_value(to_nhwc((norm + 1)*127.5, data_format), 0, 255)
def slerp(val, low, high):
"""Code from https://github.com/soumith/dcgan.torch/issues/14"""
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0:
return (1.0-val) * low + val * high # L'Hopital's rule/LERP
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high
class Trainer(object):
def __init__(self, config, data_loader):
self.config = config
self.data_loader = data_loader
self.dataset = config.dataset
self.train_stats_file = config.train_stats_file
self.beta1 = config.beta1
self.beta2 = config.beta2
self.optimizer = config.optimizer
self.batch_size = config.batch_size
self.step = tf.Variable(0, name='step', trainable=False)
self.g_lr = tf.Variable(config.g_lr, name='g_lr')
self.d_lr = tf.Variable(config.d_lr, name='d_lr')
self.g_lr_update = tf.assign(self.g_lr, self.g_lr * 0.5, name='g_lr_update')
self.d_lr_update = tf.assign(self.d_lr, self.d_lr * 0.5, name='d_lr_update')
self.gamma = config.gamma
self.lambda_k = config.lambda_k
self.z_num = config.z_num
self.conv_hidden_num = config.conv_hidden_num
self.input_scale_size = config.input_scale_size
self.model_dir = config.model_dir
self.load_checkpoint = config.load_checkpoint
self.checkpoint_name = config.checkpoint_name
self.use_gpu = config.use_gpu
self.data_format = config.data_format
_, height, width, self.channel = \
get_conv_shape(self.data_loader, self.data_format)
self.repeat_num = int(np.log2(height)) - 2
self.start_step = config.start_step
self.log_step = config.log_step
self.max_step = config.max_step
self.save_step = config.save_step
self.lr_update_step = config.lr_update_step
# TTS stuff
self.update_k = config.update_k
self.k_constant = config.k_constant
#self.global_norm_thres = 100.0
#self.clip_value_min = -0.1
#self.clip_value_max = 0.1
self.eval_num_samples = config.eval_num_samples
self.eval_batch_size = config.eval_batch_size
self.eval_step = config.eval_step
self.output_height = config.input_scale_size
self.output_width = self.output_height
self.is_train = config.is_train
self.build_model()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_model_secs=3600,
global_step=self.step,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
# dirty way to bypass graph finilization error
g = tf.get_default_graph()
g._finalized = False
if not self.is_train:
self.build_test_model()
def train(self):
print("load train stats..", end="")
# load precalculated training set statistics
f = np.load(self.train_stats_file)
mu_trn, sigma_trn = f['mu'][:], f['sigma'][:]
f.close()
print("ok")
z_fixed = np.random.uniform(-1, 1, size=(self.batch_size, self.z_num))
x_fixed = self.get_image_from_loader()
save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
prev_measure = 1
measure_history = deque([0]*self.lr_update_step, self.lr_update_step)
# load inference model
fid.create_inception_graph("inception-2015-12-05/classify_image_graph_def.pb")
#query_tensor = fid.get_Fid_query_tensor(self.sess)
if self.load_checkpoint:
if self.load(self.model_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Precallocate prediction array for kl/fid inception score
#print("preallocate %.3f GB for prediction array.." % (self.eval_num_samples * 2048 / (1024**3)), end=" ", flush=True)
inception_activations = np.ones([self.eval_num_samples, 2048])
#print("ok")
for step in trange(self.start_step, self.max_step):
# Optimize
self.sess.run([self.d_optim, self.g_optim])
# Feed dict
fetch_dict = {"measure": self.measure}
if self.update_k:
fetch_dict.update({"k_update": self.k_update})
if step % self.log_step == 0:
fetch_dict.update({
"summary": self.summary_op,
"g_loss": self.g_loss,
"d_loss": self.d_loss,
"k_t": self.k_t,
})
# Get summaries
result = self.sess.run(fetch_dict)
measure = result['measure']
measure_history.append(measure)
if step % self.log_step == 0:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
g_loss = result['g_loss']
d_loss = result['d_loss']
k_t = result['k_t']
print("[{}/{}] Loss_D: {:.6f} Loss_G: {:.6f} measure: {:.4f}, k_t: {:.4f}". \
format(step, self.max_step, d_loss, g_loss, measure, k_t))
if step % (self.log_step * 10) == 0:
x_fake = self.generate(z_fixed, self.model_dir, idx=step)
self.autoencode(x_fixed, self.model_dir, idx=step, x_fake=x_fake)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run([self.g_lr_update, self.d_lr_update])
# FID
if step % self.eval_step == 0:
eval_batches_num = self.eval_num_samples // self.eval_batch_size
for eval_batch in range(eval_batches_num):
print("\rFID batch %d/%d" % (eval_batch + 1, eval_batches_num), end="", flush=True)
sample_z_eval = np.random.uniform(-1, 1, size=(self.eval_batch_size, self.z_num))
samples_eval = self.generate(sample_z_eval, self.model_dir, save=False)
activations_batch = fid.get_activations(samples_eval,
self.sess,
batch_size=self.eval_batch_size,
verbose=False)
frm = eval_batch * self.eval_batch_size
to = frm + self.eval_batch_size
inception_activations[frm:to,:] = activations_batch
print()
# calculate FID
print("FID:", end=" ", flush=True)
try:
mu_eval = np.mean(inception_activations, axis=0)
sigma_eval = np.cov(inception_activations, rowvar=False)
FID = fid.calculate_frechet_distance(mu_eval, sigma_eval, mu_trn, sigma_trn)
except Exception as e:
print(e)
FID = 500
print(FID)
self.sess.run(tf.assign(self.fid, FID))
summary_str = self.sess.run(self.fid_sum)
self.summary_writer.add_summary(summary_str, step)
#print("eval finished")
def build_model(self):
self.x = self.data_loader
x = norm_img(self.x)
self.z = tf.random_uniform(
(tf.shape(x)[0], self.z_num), minval=-1.0, maxval=1.0)
if self.update_k:
self.k_t = tf.Variable(0.0, trainable=False, name='k_t')
else:
self.k_t = tf.constant(self.k_constant, name="k_t")
G, self.G_var = GeneratorCNN(
self.z, self.conv_hidden_num, self.channel,
self.repeat_num, self.data_format, reuse=False)
d_out, self.D_z, self.D_var = DiscriminatorCNN(
tf.concat([G, x], 0), self.channel, self.z_num, self.repeat_num,
self.conv_hidden_num, self.data_format)
AE_G, AE_x = tf.split(d_out, 2, 0)
self.G = denorm_img(G, self.data_format)
self.AE_G, self.AE_x = denorm_img(AE_G, self.data_format), denorm_img(AE_x, self.data_format)
if self.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer
else:
raise Exception("[!] Caution! Paper didn't use {} opimizer other than Adam".format(config.optimizer))
g_optimizer, d_optimizer = optimizer(self.g_lr), optimizer(self.d_lr)
self.d_loss_real = tf.reduce_mean(tf.abs(AE_x - x))
self.d_loss_fake = tf.reduce_mean(tf.abs(AE_G - G))
self.d_loss = self.d_loss_real - self.k_t * self.d_loss_fake
self.g_loss = tf.reduce_mean(tf.abs(AE_G - G))
self.d_optim = d_optimizer.minimize(self.d_loss, var_list=self.D_var)
self.g_optim = g_optimizer.minimize(self.g_loss, global_step=self.step, var_list=self.G_var)
#grads, vrbls = zip(*d_optimizer.compute_gradients(self.d_loss, self.D_var))
#grads, _ = tf.clip_by_global_norm(grads, self.global_norm_thres)
#grads = [
# tf.clip_by_value(grad, self.clip_value_min, self.clip_value_max)
# for grad in grads]
#grads = [tf.div(grad, tf.reduce_max(grad)) for grad in grads]
#self.d_optim = d_optimizer.apply_gradients(zip(grads, vrbls))
#grads, vrbls = zip(*g_optimizer.compute_gradients(self.g_loss, self.G_var))
#grads, _ = tf.clip_by_global_norm(grads, self.global_norm_thres)
#grads = [
# tf.clip_by_value(grad, self.clip_value_min, self.clip_value_max)
# for grad in grads]
#grads = [tf.div(grad, tf.reduce_max(grad)) for grad in grads]
#self.g_optim = g_optimizer.apply_gradients(zip(grads, vrbls), global_step=self.step)
self.balance = self.gamma * self.d_loss_real - self.g_loss
self.measure = self.d_loss_real + tf.abs(self.balance)
# k update
if self.update_k:
self.k_update = tf.assign(self.k_t,
tf.clip_by_value(self.k_t + self.lambda_k * self.balance, 0, 1))
self.summary_op = tf.summary.merge([
tf.summary.image("G", self.G),
tf.summary.image("AE_G", self.AE_G),
tf.summary.image("AE_x", self.AE_x),
tf.summary.scalar("loss/d_loss", self.d_loss),
tf.summary.scalar("loss/d_loss_real", self.d_loss_real),
tf.summary.scalar("loss/d_loss_fake", self.d_loss_fake),
tf.summary.scalar("loss/g_loss", self.g_loss),
tf.summary.scalar("misc/measure", self.measure),
tf.summary.scalar("misc/k_t", self.k_t),
tf.summary.scalar("misc/d_lr", self.d_lr),
tf.summary.scalar("misc/g_lr", self.g_lr),
tf.summary.scalar("misc/balance", self.balance),
])
# TTS stuff
self.image_enc_data = tf.placeholder(tf.uint8,[self.output_height, self.output_width, 3])
self.encode_jpeg = tf.image.encode_jpeg(self.image_enc_data)
self.fid = tf.Variable(0.0, trainable=False)
self.fid_sum = tf.summary.scalar("FID", self.fid)
def build_test_model(self):
with tf.variable_scope("test") as vs:
# Extra ops for interpolation
z_optimizer = tf.train.AdamOptimizer(0.0001)
self.z_r = tf.get_variable("z_r", [self.batch_size, self.z_num], tf.float32)
self.z_r_update = tf.assign(self.z_r, self.z)
G_z_r, _ = GeneratorCNN(
self.z_r, self.conv_hidden_num, self.channel, self.repeat_num, self.data_format, reuse=True)
with tf.variable_scope("test") as vs:
self.z_r_loss = tf.reduce_mean(tf.abs(self.x - G_z_r))
self.z_r_optim = z_optimizer.minimize(self.z_r_loss, var_list=[self.z_r])
test_variables = tf.contrib.framework.get_variables(vs)
self.sess.run(tf.variables_initializer(test_variables))
def generate(self, inputs, root_path=None, path=None, idx=None, save=True):
x = self.sess.run(self.G, {self.z: inputs})
if path is None and save:
path = os.path.join(root_path, '{}_G.png'.format(idx))
save_image(x, path)
print("[*] Samples saved: {}".format(path))
return x
def autoencode(self, inputs, path, idx=None, x_fake=None):
items = {
'real': inputs,
'fake': x_fake,
}
for key, img in items.items():
if img is None:
continue
if img.shape[3] in [1, 3]:
img = img.transpose([0, 3, 1, 2])
x_path = os.path.join(path, '{}_D_{}.png'.format(idx, key))
x = self.sess.run(self.AE_x, {self.x: img})
save_image(x, x_path)
print("[*] Samples saved: {}".format(x_path))
def encode(self, inputs):
if inputs.shape[3] in [1, 3]:
inputs = inputs.transpose([0, 3, 1, 2])
return self.sess.run(self.D_z, {self.x: inputs})
def decode(self, z):
return self.sess.run(self.AE_x, {self.D_z: z})
def interpolate_G(self, real_batch, step=0, root_path='.', train_epoch=0):
batch_size = len(real_batch)
half_batch_size = int(batch_size/2)
self.sess.run(self.z_r_update)
tf_real_batch = to_nchw_numpy(real_batch)
for i in trange(train_epoch):
z_r_loss, _ = self.sess.run([self.z_r_loss, self.z_r_optim], {self.x: tf_real_batch})
z = self.sess.run(self.z_r)
z1, z2 = z[:half_batch_size], z[half_batch_size:]
real1_batch, real2_batch = real_batch[:half_batch_size], real_batch[half_batch_size:]
generated = []
for idx, ratio in enumerate(np.linspace(0, 1, 10)):
z = np.stack([slerp(ratio, r1, r2) for r1, r2 in zip(z1, z2)])
z_decode = self.generate(z, save=False)
generated.append(z_decode)
generated = np.stack(generated).transpose([1, 0, 2, 3, 4])
for idx, img in enumerate(generated):
save_image(img, os.path.join(root_path, 'test{}_interp_G_{}.png'.format(step, idx)), nrow=10)
all_img_num = np.prod(generated.shape[:2])
batch_generated = np.reshape(generated, [all_img_num] + list(generated.shape[2:]))
save_image(batch_generated, os.path.join(root_path, 'test{}_interp_G.png'.format(step)), nrow=10)
def interpolate_D(self, real1_batch, real2_batch, step=0, root_path="."):
real1_encode = self.encode(real1_batch)
real2_encode = self.encode(real2_batch)
decodes = []
for idx, ratio in enumerate(np.linspace(0, 1, 10)):
z = np.stack([slerp(ratio, r1, r2) for r1, r2 in zip(real1_encode, real2_encode)])
z_decode = self.decode(z)
decodes.append(z_decode)
decodes = np.stack(decodes).transpose([1, 0, 2, 3, 4])
for idx, img in enumerate(decodes):
img = np.concatenate([[real1_batch[idx]], img, [real2_batch[idx]]], 0)
save_image(img, os.path.join(root_path, 'test{}_interp_D_{}.png'.format(step, idx)), nrow=10 + 2)
def test(self):
root_path = "./"#self.model_dir
all_G_z = None
for step in range(3):
real1_batch = self.get_image_from_loader()
real2_batch = self.get_image_from_loader()
save_image(real1_batch, os.path.join(root_path, 'test{}_real1.png'.format(step)))
save_image(real2_batch, os.path.join(root_path, 'test{}_real2.png'.format(step)))
self.autoencode(
real1_batch, self.model_dir, idx=os.path.join(root_path, "test{}_real1".format(step)))
self.autoencode(
real2_batch, self.model_dir, idx=os.path.join(root_path, "test{}_real2".format(step)))
self.interpolate_G(real1_batch, step, root_path)
#self.interpolate_D(real1_batch, real2_batch, step, root_path)
z_fixed = np.random.uniform(-1, 1, size=(self.batch_size, self.z_num))
G_z = self.generate(z_fixed, path=os.path.join(root_path, "test{}_G_z.png".format(step)))
if all_G_z is None:
all_G_z = G_z
else:
all_G_z = np.concatenate([all_G_z, G_z])
save_image(all_G_z, '{}/G_z{}.png'.format(root_path, step))
save_image(all_G_z, '{}/all_G_z.png'.format(root_path), nrow=16)
def get_image_from_loader(self):
x = self.data_loader.eval(session=self.sess)
if self.data_format == 'NCHW':
x = x.transpose([0, 2, 3, 1])
return x
# Load checkpoint
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints from %s..." % checkpoint_dir)
#checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
return True
else:
print(" [*] Failed to find a checkpoint")
return False
|
|
#reading different model files
import numpy as np
from numpy import recfromtxt, genfromtxt
import pandas as pd
from astropy import units as u
import logging
# Adding logging support
logger = logging.getLogger(__name__)
from tardis.util import parse_quantity
class ConfigurationError(Exception):
pass
def read_density_file(density_filename, density_filetype, time_explosion, v_inner_boundary=0.0, v_outer_boundary=np.inf):
"""
read different density file formats
Parameters
----------
density_filename: ~str
filename or path of the density file
density_filetype: ~str
type of the density file
time_explosion: ~astropy.units.Quantity
time since explosion used to scale the density
"""
file_parsers = {'artis': read_artis_density,
'simple_ascii': read_simple_ascii_density}
time_of_model, index, v_inner, v_outer, unscaled_mean_densities = file_parsers[density_filetype](density_filename)
mean_densities = calculate_density_after_time(unscaled_mean_densities, time_of_model, time_explosion)
if v_inner_boundary > v_outer_boundary:
raise ConfigurationError('v_inner_boundary > v_outer_boundary '
'({0:s} > {1:s}). unphysical!'.format(
v_inner_boundary, v_outer_boundary))
if (not np.isclose(v_inner_boundary, 0.0 * u.km / u.s,
atol=1e-8 * u.km / u.s)
and v_inner_boundary > v_inner[0]):
if v_inner_boundary > v_outer[-1]:
raise ConfigurationError('Inner boundary selected outside of model')
inner_boundary_index = v_inner.searchsorted(v_inner_boundary) - 1
else:
inner_boundary_index = None
v_inner_boundary = v_inner[0]
logger.warning("v_inner_boundary requested too small for readin file."
" Boundary shifted to match file.")
if not np.isinf(v_outer_boundary) and v_outer_boundary < v_outer[-1]:
outer_boundary_index = v_outer.searchsorted(v_outer_boundary) + 1
else:
outer_boundary_index = None
v_outer_boundary = v_outer[-1]
logger.warning("v_outer_boundary requested too large for readin file. Boundary shifted to match file.")
v_inner = v_inner[inner_boundary_index:outer_boundary_index]
v_inner[0] = v_inner_boundary
v_outer = v_outer[inner_boundary_index:outer_boundary_index]
v_outer[-1] = v_outer_boundary
mean_densities = mean_densities[inner_boundary_index:outer_boundary_index]
return v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index
def read_abundances_file(abundance_filename, abundance_filetype, inner_boundary_index=None, outer_boundary_index=None):
"""
read different density file formats
Parameters
----------
abundance_filename: ~str
filename or path of the density file
abundance_filetype: ~str
type of the density file
inner_boundary_index: int
index of the inner shell, default None
outer_boundary_index: int
index of the outer shell, default None
"""
file_parsers = {'simple_ascii': read_simple_ascii_abundances,
'artis': read_simple_ascii_abundances}
index, abundances = file_parsers[abundance_filetype](abundance_filename)
if outer_boundary_index is not None:
outer_boundary_index_m1 = outer_boundary_index - 1
else:
outer_boundary_index_m1 = None
index = index[inner_boundary_index:outer_boundary_index]
abundances = abundances.ix[:, slice(inner_boundary_index, outer_boundary_index_m1)]
abundances.columns = np.arange(len(abundances.columns))
return index, abundances
def read_simple_ascii_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5 s
#index velocity [km/s] density [g/cm^3]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
time_of_model_string = fh.readline().strip()
time_of_model = parse_quantity(time_of_model_string)
data = recfromtxt(fname, skip_header=1, names=('index', 'velocity', 'density'), dtype=(int, float, float))
velocity = (data['velocity'] * u.km / u.s).to('cm/s')
v_inner, v_outer = velocity[:-1], velocity[1:]
mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]
return time_of_model, data['index'], v_inner, v_outer, mean_density
def read_artis_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5
#index velocity [km/s] log10(density) [log10(g/cm^3)]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
for i, line in enumerate(file(fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')
mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
v_inner, v_outer = velocity[:-1], velocity[1:]
return time_of_model, artis_model['index'], v_inner, v_outer, mean_density
def read_simple_ascii_abundances(fname):
"""
Reading an abundance file of the following structure (example; lines starting with hash will be ignored):
The first line of abundances describe the abundances in the center of the model and are not used.
#index element1, element2, ..., element30
0 0.4 0.3, .. 0.2
Parameters
----------
fname: str
filename or path with filename
Returns
-------
index: ~np.ndarray
containing the indices
abundances: ~pandas.DataFrame
data frame containing index, element1 - element30 and columns according to the shells
"""
data = np.loadtxt(fname)
index = data[1:,0].astype(int)
abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))
return index, abundances
def calculate_density_after_time(densities, time_0, time_explosion):
"""
scale the density from an initial time of the model to the time of the explosion by ^-3
Parameters:
-----------
densities: ~astropy.units.Quantity
densities
time_0: ~astropy.units.Quantity
time of the model
time_explosion: ~astropy.units.Quantity
time to be scaled to
Returns:
--------
scaled_density
"""
return densities * (time_explosion / time_0) ** -3
|
|
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, JsonResponse
from django.utils.decorators import decorator_from_middleware
from django.views.decorators.http import require_http_methods
from .exceptions import BadRequest, Unauthorized, Forbidden, NotFound, Conflict, PreconditionFail, OauthUnauthorized, OauthBadRequest
from .utils import req_validate, req_parse, req_process, XAPIVersionHeaderMiddleware, XAPIConsistentThroughMiddleware
# This uses the lrs logger for LRS specific information
logger = logging.getLogger(__name__)
@require_http_methods(["GET", "HEAD"])
def about(request):
lrs_data = {
"version": settings.XAPI_VERSIONS,
"extensions": {
"xapi": {
"statements":
{
"name": "Statements",
"methods": ["GET", "POST", "PUT", "HEAD"],
"endpoint": reverse('lrs:statements'),
"description": "Endpoint to submit and retrieve XAPI statements.",
},
"activities":
{
"name": "Activities",
"methods": ["GET", "HEAD"],
"endpoint": reverse('lrs:activities'),
"description": "Endpoint to retrieve a complete activity object.",
},
"activities_state":
{
"name": "Activities State",
"methods": ["PUT", "POST", "GET", "DELETE", "HEAD"],
"endpoint": reverse('lrs:activity_state'),
"description": "Stores, fetches, or deletes the document specified by the given stateId that exists in the context of the specified activity, agent, and registration (if specified).",
},
"activities_profile":
{
"name": "Activities Profile",
"methods": ["PUT", "POST", "GET", "DELETE", "HEAD"],
"endpoint": reverse('lrs:activity_profile'),
"description": "Saves/retrieves/deletes the specified profile document in the context of the specified activity.",
},
"agents":
{
"name": "Agents",
"methods": ["GET", "HEAD"],
"endpoint": reverse('lrs:agents'),
"description": "Returns a special, Person object for a specified agent.",
},
"agents_profile":
{
"name": "Agent Profile",
"methods": ["PUT", "POST", "GET", "DELETE", "HEAD"],
"endpoint": reverse('lrs:agent_profile'),
"description": "Saves/retrieves/deletes the specified profile document in the context of the specified agent.",
}
}
}
}
return JsonResponse(lrs_data)
@require_http_methods(["GET", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def statements_more(request, more_id):
return handle_request(request, more_id)
@require_http_methods(["GET", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def statements_more_placeholder(request):
return HttpResponseForbidden("Forbidden")
@require_http_methods(["PUT", "GET", "POST", "HEAD"])
@decorator_from_middleware(XAPIConsistentThroughMiddleware.XAPIConsistentThrough)
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def statements(request):
return handle_request(request)
@require_http_methods(["PUT", "POST", "GET", "DELETE", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def activity_state(request):
return handle_request(request)
@require_http_methods(["PUT", "POST", "GET", "DELETE", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def activity_profile(request):
return handle_request(request)
@require_http_methods(["GET", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def activities(request):
return handle_request(request)
@require_http_methods(["PUT", "POST", "GET", "DELETE", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def agent_profile(request):
return handle_request(request)
@require_http_methods(["GET", "HEAD"])
@decorator_from_middleware(XAPIVersionHeaderMiddleware.XAPIVersionHeader)
def agents(request):
return handle_request(request)
@transaction.atomic
def handle_request(request, more_id=None):
validators = {
reverse('lrs:statements').lower(): {
"POST": req_validate.statements_post,
"GET": req_validate.statements_get,
"PUT": req_validate.statements_put,
"HEAD": req_validate.statements_get
},
reverse('lrs:statements_more_placeholder').lower(): {
"GET": req_validate.statements_more_get,
"HEAD": req_validate.statements_more_get
},
reverse('lrs:activity_state').lower(): {
"POST": req_validate.activity_state_post,
"PUT": req_validate.activity_state_put,
"GET": req_validate.activity_state_get,
"HEAD": req_validate.activity_state_get,
"DELETE": req_validate.activity_state_delete
},
reverse('lrs:activity_profile').lower(): {
"POST": req_validate.activity_profile_post,
"PUT": req_validate.activity_profile_put,
"GET": req_validate.activity_profile_get,
"HEAD": req_validate.activity_profile_get,
"DELETE": req_validate.activity_profile_delete
},
reverse('lrs:activities').lower(): {
"GET": req_validate.activities_get,
"HEAD": req_validate.activities_get
},
reverse('lrs:agent_profile').lower(): {
"POST": req_validate.agent_profile_post,
"PUT": req_validate.agent_profile_put,
"GET": req_validate.agent_profile_get,
"HEAD": req_validate.agent_profile_get,
"DELETE": req_validate.agent_profile_delete
},
reverse('lrs:agents').lower(): {
"GET": req_validate.agents_get,
"HEAD": req_validate.agents_get
}
}
processors = {
reverse('lrs:statements').lower(): {
"POST": req_process.statements_post,
"GET": req_process.statements_get,
"HEAD": req_process.statements_get,
"PUT": req_process.statements_put
},
reverse('lrs:statements_more_placeholder').lower(): {
"GET": req_process.statements_more_get,
"HEAD": req_process.statements_more_get
},
reverse('lrs:activity_state').lower(): {
"POST": req_process.activity_state_post,
"PUT": req_process.activity_state_put,
"GET": req_process.activity_state_get,
"HEAD": req_process.activity_state_get,
"DELETE": req_process.activity_state_delete
},
reverse('lrs:activity_profile').lower(): {
"POST": req_process.activity_profile_post,
"PUT": req_process.activity_profile_put,
"GET": req_process.activity_profile_get,
"HEAD": req_process.activity_profile_get,
"DELETE": req_process.activity_profile_delete
},
reverse('lrs:activities').lower(): {
"GET": req_process.activities_get,
"HEAD": req_process.activities_get
},
reverse('lrs:agent_profile').lower(): {
"POST": req_process.agent_profile_post,
"PUT": req_process.agent_profile_put,
"GET": req_process.agent_profile_get,
"HEAD": req_process.agent_profile_get,
"DELETE": req_process.agent_profile_delete
},
reverse('lrs:agents').lower(): {
"GET": req_process.agents_get,
"HEAD": req_process.agents_get
}
}
try:
r_dict = req_parse.parse(request, more_id)
path = request.path.lower()
if path.endswith('/'):
path = path.rstrip('/')
# Cutoff more_id
if 'more' in path:
path = "%s/%s" % (reverse('lrs:statements').lower(), "more")
req_dict = validators[path][r_dict['method']](r_dict)
return processors[path][req_dict['method']](req_dict)
except (BadRequest, OauthBadRequest, HttpResponseBadRequest) as err:
status = 400
log_exception(status, request.path)
response = HttpResponse(err.message, status=status)
except (Unauthorized, OauthUnauthorized) as autherr:
status = 401
log_exception(status, request.path)
response = HttpResponse(autherr, status=status)
response['WWW-Authenticate'] = 'Basic realm="ADLLRS"'
except Forbidden as forb:
status = 403
log_exception(status, request.path)
response = HttpResponse(forb.message, status=status)
except NotFound as nf:
status = 404
log_exception(status, request.path)
response = HttpResponse(nf.message, status=status)
except Conflict as c:
status = 409
log_exception(status, request.path)
response = HttpResponse(c.message, status=status)
except PreconditionFail as pf:
status = 412
log_exception(status, request.path)
response = HttpResponse(pf.message, status=status)
except Exception as err:
status = 500
log_exception(status, request.path)
response = HttpResponse(err.message, status=status)
return response
def log_exception(status, path):
info = "%s === %s" % (status, path)
logger.exception(info)
|
|
import string
import re
import os
from writer import *
from tmsim import *
# Interprets a TMD program on the fly. Prints out the behaviors of the
# different tapes as it goes.
# Use this to debug your handwritten TMD program before compiling it
# to a Turing machine!
def pront(x):
print x
class TuringMachineWithStack:
def __init__(self, functions, path, functionLabelDictionary, functionLineDictionary, functionVariableDictionary, lineNumberToTextNumber, initVarString):
mainFunctionName = string.strip(functions[0])
self.stack = [(mainFunctionName, functionVariableDictionary[mainFunctionName], 1)]
self.lineNumber = 1
self.path = path
self.functions = functions
self.functionLabelDictionary = functionLabelDictionary
self.functionLineDictionary = functionLineDictionary
self.functionVariableDictionary = functionVariableDictionary
self.lineNumberToTextNumber = lineNumberToTextNumber
self.tapeDictionary = {}
firstLineOfMainFunction = open(path + string.strip(functions[0]) + ".tmd", "r").readlines()[0]
for i, varName in enumerate(string.split(firstLineOfMainFunction)[1:]):
tape = Tape(varName, "_")
self.tapeDictionary[i+1] = tape
for j, symbol in enumerate(initVarString):
self.tapeDictionary[i+1].tapeDict[j] = symbol
# Returns 1 upon halting, 0 otherwise
def runOneStep(self):
currentFunction = self.stack[-1][0]
currentLine = self.functionLineDictionary[currentFunction][self.lineNumber]
# get rid of labels
currentLine = string.split(currentLine, ":")[-1]
if "[" in currentLine:
# direct command
splitLine = re.split("[\[|\]]", currentLine)
variableName = splitLine[1]
reactions = string.split(splitLine[2], ";")
foundAppropriateReaction = False
tapeName = self.stack[-1][1][variableName]
currentTape = self.tapeDictionary[tapeName]
currentSymbol = currentTape.readSymbol()
for reaction in reactions:
# ugly stuff at the end of next line is for removing whitespce
splitReaction = re.split("[(|)|,]", string.strip(reaction).replace(" ", ""))
if splitReaction[0] == currentSymbol:
foundAppropriateReaction = True
for command in splitReaction[1:]:
if command in ["1", "E", "_"]:
currentTape.writeSymbol(command)
for command in splitReaction[1:]:
if command in ["L", "R", "-"]:
currentTape.moveHead(command)
foundGoto = False
for command in splitReaction[1:]:
if not command in ["1", "E", "_", "L", "R", "-", ""]:
try:
self.lineNumber = self.functionLabelDictionary[currentFunction][command]
foundGoto = True
except:
raise Exception("Unrecognized label on line " + str(self.lineNumberToTextNumber[self.lineNumber]) + " of function " + currentFunction)
if not foundGoto:
self.lineNumber += 1
if not foundAppropriateReaction:
raise Exception("Turing machine threw error on line " + str(self.lineNumberToTextNumber[self.lineNumber]) + " of function " + currentFunction)
return 1
elif string.split(currentLine)[0] == "function":
oldMappingDict = self.stack[-1][1]
calledFunction = string.split(currentLine)[1]
firstLineOfCalledFunctionSplit = string.split(open(self.path + calledFunction + ".tmd", "r").readlines()[0])
argList = string.split(currentLine)[2:]
try:
assert len(firstLineOfCalledFunctionSplit[1:]) == len(argList)
except:
raise Exception("Function call on line " + str(self.lineNumberToTextNumber[self.lineNumber]) + " of function " + currentFunction + " has " + str(len(argList)) + \
" arguments, but the function being called has " + str(len(firstLineOfCalledFunctionSplit[1:])) + " inputs.")
newMappingDict = {}
for i, variableName in enumerate(firstLineOfCalledFunctionSplit[1:]):
newMappingDict[variableName] = oldMappingDict[argList[i]]
self.stack.append((calledFunction, newMappingDict, self.lineNumber + 1))
self.lineNumber = 1
elif string.split(currentLine)[0] == "return":
oldLineNumber = self.lineNumber
self.lineNumber = self.stack[-1][2]
self.stack.pop()
if len(self.stack) == 0:
pront("Turing machine halted on line " + str(self.lineNumberToTextNumber[oldLineNumber]) + " of function " + currentFunction)
return 1
else:
raise Exception("Line " + str(self.lineNumberToTextNumber[self.lineNumber]) + " of function " + currentFunction + " is malformed.")
return 0
def run(self, quiet=False, numSteps=float("Inf"), outputFile=None):
stepCounter = 0
while self.runOneStep() == 0:
stepCounter += 1
if stepCounter >= numSteps:
pront("Turing machine ran for " + str(numSteps) + " steps without halting.")
break
if not quiet:
self.printAllTapes(-2, 160, outputFile)
def printAllTapes(self, start, end, output):
outString = "Function: " + self.stack[-1][0] + "\n"
outString += "Line number: " + str(self.lineNumberToTextNumber[self.lineNumber]) + "\n"
outString += "\n"
for tape in self.tapeDictionary.values():
outString += tape.getTapeOutput(start, end)
outString += "\n"
outString += "Stack:\n"
outString += "\n"
for tupIndex in range(len(self.stack) - 1, -1, -1):
outString += "Function " + self.stack[tupIndex][0] + " with return address " + \
str(self.lineNumberToTextNumber[self.stack[tupIndex][2]]) + " and with mapping " + str(self.stack[tupIndex][1]) + "\n"
outString += "\n"
outString += "---------------------------------------"
if output == None:
pront(outString)
else:
output.write(outString + "\n")
def main():
dirName = sys.argv[-1]
path = "../tmd_dirs/" + dirName + "/"
try:
assert len(sys.argv) > 1
for flag in sys.argv[2:-1]:
if not (flag in ["-q", "-s", "-f"]):
int(flag)
except:
raise Exception("Usage: python tmd_interpreter [-q] [-s] [# steps before aborting] [-f] [name of TMD directory]\n \
Enable -q if you want no program output\n \
Enable -s followed by the max number of steps if you want to stop interpreting after a certain number of commands\n \
Enable -f if you want to dump the history into a file in tmd_histories instead of the standard output.")
try:
assert os.path.exists(path)
except:
raise Exception("Directory " + path + " does not exist.")
try:
functions = [string.strip(x) for x in open(path + "functions", "r").readlines()]
except:
raise Exception("No functions file found in directory " + path)
functionLabelDictionary, functionDictionary, functionLineDictionary, lineNumberToTextNumber = getFunctionLabelDictionary(functions, path)
functionVariableDictionary = getFunctionVariableDictionary(functions, path)
try:
initVarString = string.strip(open(path + "initvar", "r").read())
except:
raise Exception("No initvar file found in directory " + path)
if "-q" in sys.argv:
quiet = True
else:
quiet = False
if "-s" in sys.argv:
numSteps = int(sys.argv[sys.argv.index("-s") + 1])
else:
numSteps = float("inf")
HISTORY_PATH = "../tmd_histories/"
if "-f" in sys.argv:
outputFile = open(HISTORY_PATH + dirName + "_tmd_history.txt", "w")
outputFile.write("\n")
try:
assert "-s" in sys.argv
except:
raise Exception("You can't include the -f flag without also specifying a maximum step count with the -s flag!")
else:
outputFile = None
TuringMachineWithStack(functions, path, functionLabelDictionary, functionLineDictionary,
functionVariableDictionary, lineNumberToTextNumber, initVarString).run(quiet, numSteps, outputFile)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.compat import lrange, range
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.codes[0].dtype == 'int8'
assert i.codes[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.codes[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.codes[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.codes[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_codes = np.arange(70000)
minor_codes = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes])
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with pytest.raises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with pytest.raises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame({'a': r, 'b': r},
index=pd.MultiIndex.from_tuples([(x, x) for x in r]))
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df['a'].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(TypeError, match=mutable_regex):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values('buzz')
expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')
tm.assert_index_equal(result, expected)
def test_hash_error(indices):
index = indices
with pytest.raises(TypeError, match=("unhashable type: %r" %
type(index).__name__)):
hash(indices)
def test_mutability(indices):
if not len(indices):
return
pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
def test_wrong_number_names(indices):
with pytest.raises(ValueError, match="^Length"):
indices.names = ["apple", "banana", "carrot"]
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
|
|
"""
Javascript code printer
The JavascriptCodePrinter converts single sympy expressions into single
Javascript expressions, using the functions defined in the Javascript
Math object where possible.
"""
from __future__ import print_function, division
from sympy.core import S, C
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
from sympy.core.compatibility import string_types
# dictionary mapping sympy function to (argument_conditions, Javascript_function).
# Used in JavascriptCodePrinter._print_Function(self)
known_functions = {
'Abs': 'Math.abs',
'sin': 'Math.sin',
'cos': 'Math.cos',
'tan': 'Math.tan',
'acos': 'Math.acos',
'asin': 'Math.asin',
'atan': 'Math.atan',
'atan2': 'Math.atan2',
'ceiling': 'Math.ceil',
'floor': 'Math.floor',
'sign': 'Math.sign',
'exp': 'Math.exp',
'log': 'Math.log',
}
class JavascriptCodePrinter(CodePrinter):
""""A Printer to convert python expressions to strings of javascript code
"""
printmethod = '_javascript'
language = 'Javascript'
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "var {0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (var %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# Javascript arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'Math.sqrt(%s)' % self._print(expr.base)
else:
return 'Math.pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d/%d' % (p, q)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "Math.E"
def _print_Pi(self, expr):
return 'Math.PI'
def _print_Infinity(self, expr):
return 'Number.POSITIVE_INFINITY'
def _print_NegativeInfinity(self, expr):
return 'Number.NEGATIVE_INFINITY'
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(expr.parent, expr.j +
expr.i*expr.parent.shape[1])
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> jscode((2*tau)**Rational(7, 2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(jscode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> jscode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(jscode(mat, A))
A[0] = Math.pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = Math.sin(x);
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
def print_jscode(expr, **settings):
"""Prints the Javascript representation of the given expression.
See jscode for the meaning of the optional arguments.
"""
print(jscode(expr, **settings))
|
|
import pytest
from ate.ate import Template, ParseContext
from ate.tags import CompileStatement
from ate.tags import TextNode
from ate.tags import ExpressionNode
from ate.tags import BlockStatementNode
from ate.tags import MainNode
from ate.tags import ForBlockStatementNode
from ate.tags import CommentNode
from ate.tags import parse_expression
from ate.exceptions import ExpressionNotClosed
class TestMyTpl:
def test_empty(self):
template = Template("")
assert isinstance(template.mainnode, MainNode)
assert len(template.mainnode.nodes) == 0
def test_multiline(self):
tpl = """Hello
world
bye!"""
template = Template(tpl)
assert isinstance(template.mainnode, MainNode)
assert isinstance(template.mainnode.nodes[0], TextNode)
assert template.mainnode.nodes[0].text == tpl
def test_statement(self):
tpl = "{{hello}}"
res, skip = CompileStatement(ParseContext(tpl))
assert skip == len(tpl)
assert isinstance(res, ExpressionNode)
assert res.expression == "hello"
def test_block(self):
tpl = "{% for i in abc%} x {% endfor %}"
res, skip = CompileStatement(ParseContext(tpl))
assert skip == len(tpl)
assert isinstance(res, BlockStatementNode)
assert res.type == "for"
assert res.expression == "i in abc"
assert res.code == tpl
assert len(res.nodes) == 1
assert isinstance(res.nodes[0], TextNode)
assert res.nodes[0].text == " x "
def test_complex(self):
tpl = """Hello World
{% for i in abc%}
x {{i}}
{% endfor %}
That's all!"""
t = Template(tpl)
assert len(t.mainnode.nodes) == 3
nodes = t.mainnode.nodes
assert isinstance(nodes[0], TextNode)
assert isinstance(nodes[1], BlockStatementNode)
assert isinstance(nodes[2], TextNode)
def test_closing_spacing(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext("{%for%}{%endfor%}"), len("{%for%}"))
assert index == 17
def test_closing_spacing2(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext("{%for%}{% endfor%}"), len("{%for%}"))
assert index == 18
def test_closing_spacing3(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext("{%for%}{% endfor%}"), len("{%for%}"))
assert index == 19
def test_closing_spacing4(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext("{%for%}{%endfor %}"), len("{%for%}"))
assert index == 18
def test_closing_spacing5(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext("{%for%}{%endfor %}"), len("{%for%}"))
assert index == 19
def test_closing_spacing6(self):
tn = ForBlockStatementNode("for")
index = tn.compile(ParseContext(
"{%for%}{% endfor %}"), len("{%for%}"))
assert index == 21
def test_close_marker_in_expr(self):
tpl = "{% for i in '%}' %} x {% endfor %}"
res, skip = CompileStatement(ParseContext(tpl))
assert res.expression == "i in '%}'"
def test_comment_simple(self):
tpl = "{# hello world #}"
res, skip = CompileStatement(ParseContext(tpl))
assert isinstance(res, CommentNode)
assert res.expression == " hello world "
def test_comment_broken_statement(self):
tpl = "{# {% for #}"
res, skip = CompileStatement(ParseContext(tpl))
assert isinstance(res, CommentNode)
assert res.expression == " {% for "
def test_comment_statement(self):
tpl = """{# {% for i in '123' %}
{{i}}
{%endfor%} #}"""
res, skip = CompileStatement(ParseContext(tpl))
assert isinstance(res, CommentNode)
assert res.expression == """ {% for i in '123' %}
{{i}}
{%endfor%} """
def test_comment_expression(self):
tpl = "{# {{'hello'}} #}"
res, skip = CompileStatement(ParseContext(tpl))
assert isinstance(res, CommentNode)
assert res.expression == " {{'hello'}} "
def test_css(self):
tpl = """<html>
<style>
p, th, td {
font-family: 'Open Sans', sans-serif;
}</style>
</html>
"""
template = Template(tpl)
assert isinstance(template.mainnode, MainNode)
assert isinstance(template.mainnode.nodes[0], TextNode)
assert template.mainnode.nodes[0].text == tpl
def test_accolade(self):
tpl = """if {} { {! {% for i in '123' %}{{i}}{%endfor%} { }} {="""
template = Template(tpl)
assert isinstance(template.mainnode.nodes[0], TextNode)
assert template.mainnode.nodes[0].text == "if {} { {! "
assert isinstance(template.mainnode.nodes[1], ForBlockStatementNode)
assert isinstance(template.mainnode.nodes[2], TextNode)
assert template.mainnode.nodes[2].text == " { }} {="
class TestTemplateRender:
def test_render_text(self):
tpl = Template("Hello World\n"
"\n"
"{% for i in abc%}\n"
" x {{i}}\n"
"{% endfor %}\n"
"\n"
"That's all!")
rendered = tpl.render_nested(i=1, abc=[1])
assert rendered[0] == "Hello World\n\n"
assert rendered[1][0] == "\n x "
assert rendered[1][2] == "\n"
assert rendered[2] == "\n\nThat's all!"
class TestStatementEval:
def test_interpolation(self):
tpl = Template("{{i}}")
assert tpl.render(i=10) == "10"
assert tpl.render(i=0) == "0"
assert tpl.render(i=-10) == "-10"
assert tpl.render(i="hello") == "hello"
def test_expression(self):
tpl = Template("{{i + j}}")
assert tpl.render(i=10, j=5) == "15"
assert tpl.render(i="Hello", j="World") == "HelloWorld"
def test_string_expression(self):
tpl = Template("{{'hello'}}")
assert tpl.render() == 'hello'
def test_string_expression_specialx(self):
tpl = Template("{{'{{hello}}'}}")
assert tpl.render() == '{{hello}}'
def test_string_expression_special2(self):
tpl = Template("{{'{%hello%}'}}")
assert tpl.render() == '{%hello%}'
class TestExpressionParser:
def test_simple(self):
res, index = parse_expression("{{123}}")
assert res == "123"
assert index == 7
def test_string_single(self):
res, index = parse_expression("{{'hello'}}")
assert res == "'hello'"
assert index == 11
def test_string_double(self):
res, index = parse_expression('{{"hello"}}')
assert res == '"hello"'
assert index == 11
def test_string_escape_single(self):
res, index = parse_expression(r"{{'hel\'lo'}}")
assert res == r"'hel\'lo'"
assert index == 13
def test_string_escape_double(self):
res, index = parse_expression(r'{{"hel\"lo"}}')
assert res == r'"hel\"lo"'
assert index == 13
def test_string_escape_mix(self):
res, index = parse_expression(r'{{"hel\"\'lo"}}')
assert res == r'"hel\"\'lo"'
assert index == 15
def test_string_closing_marker(self):
res, index = parse_expression("{{'}}'}}")
assert res == "'}}'"
assert index == 8
def test_trailing(self):
res, index = parse_expression("{{123}} whatever")
assert res == "123"
assert index == 7
def test_close_error(self):
with pytest.raises(ExpressionNotClosed):
res, index = parse_expression("{{123}")
def test_close_missing(self):
with pytest.raises(ExpressionNotClosed):
res, index = parse_expression("{{123")
def test_handlebars_in_exp(self):
""" This is valid, it's up to simpleeval to choke on it.
We might reject it explicitly, perhaps?
"""
res, index = parse_expression("{{ {{ }}")
assert res == " {{ "
assert index == 8
def test_handlebars_in_remainder(self):
""" This is valid, the remainder will be treated as
ordinary text since }} has no special meaning if not
preceeded by {{
"""
res, index = parse_expression("{{ }} }}")
assert res == " "
assert index == 5
|
|
# Filename: hdf5.py
# pylint: disable=C0103,R0903,C901
# vim:set ts=4 sts=4 sw=4 et:
"""
Read and write KM3NeT-formatted HDF5 files.
"""
from collections import OrderedDict, defaultdict, namedtuple
from functools import singledispatch
import os.path
import warnings
from uuid import uuid4
import numpy as np
import tables as tb
import km3io
from thepipe import Provenance
try:
from numba import jit
except ImportError:
jit = lambda f: f
import km3pipe as kp
from thepipe import Module, Blob
from km3pipe.dataclasses import Table, NDArray
from km3pipe.logger import get_logger
from km3pipe.tools import decamelise, camelise, split, istype
log = get_logger(__name__) # pylint: disable=C0103
__author__ = "Tamas Gal and Moritz Lotze and Michael Moser"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal and Moritz Lotze"
__email__ = "tgal@km3net.de"
__status__ = "Development"
FORMAT_VERSION = np.string_("5.1")
MINIMUM_FORMAT_VERSION = np.string_("4.1")
class H5VersionError(Exception):
pass
def check_version(h5file):
try:
version = np.string_(h5file.root._v_attrs.format_version)
except AttributeError:
log.error(
"Could not determine HDF5 format version: '%s'."
"You may encounter unexpected errors! Good luck..." % h5file.filename
)
return
if split(version, int, np.string_(".")) < split(
MINIMUM_FORMAT_VERSION, int, np.string_(".")
):
raise H5VersionError(
"HDF5 format version {0} or newer required!\n"
"'{1}' has HDF5 format version {2}.".format(
MINIMUM_FORMAT_VERSION.decode("utf-8"),
h5file.filename,
version.decode("utf-8"),
)
)
class HDF5Header(object):
"""Wrapper class for the `/raw_header` table in KM3HDF5
Parameters
----------
data : dict(str, str/tuple/dict/OrderedDict)
The actual header data, consisting of a key and an entry.
If possible, the key will be set as a property and the the values will
be converted to namedtuples (fields sorted by name to ensure consistency
when dictionaries are provided).
"""
def __init__(self, data):
self._data = data
self._user_friendly_data = {} # namedtuples, if possible
self._set_attributes()
def _set_attributes(self):
"""Traverse the internal dictionary and set the getters"""
for parameter in list(self._data.keys()):
data = self._data[parameter]
if isinstance(data, dict) or isinstance(data, OrderedDict):
if not all(f.isidentifier() for f in data.keys()):
break
# Create a namedtuple for easier access
field_names, field_values = zip(*data.items())
sorted_indices = np.argsort(field_names)
clsname = "HeaderEntry" if not parameter.isidentifier() else parameter
nt = namedtuple(clsname, [field_names[i] for i in sorted_indices])
data = nt(*[field_values[i] for i in sorted_indices])
if parameter.isidentifier():
setattr(self, parameter, data)
self._user_friendly_data[parameter] = data
def __getitem__(self, key):
return self._user_friendly_data[key]
def keys(self):
return self._user_friendly_data.keys()
def values(self):
return self._user_friendly_data.values()
def items(self):
return self._user_friendly_data.items()
@classmethod
def from_table(cls, table):
data = OrderedDict()
for i in range(len(table)):
parameter = table["parameter"][i].decode()
field_names = table["field_names"][i].decode().split(" ")
field_values = table["field_values"][i].decode().split(" ")
if field_values == [""]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = table["dtype"][i].decode()
dtyped_values = []
for dtype, value in zip(dtypes.split(" "), field_values):
if dtype.startswith("a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
@classmethod
def from_km3io(cls, header):
if not isinstance(header, km3io.offline.Header):
raise TypeError(
"The given header object is not an instance of km3io.offline.Header"
)
return cls(header._data)
@classmethod
def from_aanet(cls, table):
data = OrderedDict()
for i in range(len(table)):
parameter = table["parameter"][i].astype(str)
field_names = [n.decode() for n in table["field_names"][i].split()]
field_values = [n.decode() for n in table["field_values"][i].split()]
if field_values in [[b""], []]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = table["dtype"][i]
dtyped_values = []
for dtype, value in zip(dtypes.split(), field_values):
if dtype.startswith(b"a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
@classmethod
def from_hdf5(cls, filename):
with tb.open_file(filename, "r") as f:
table = f.get_node("/raw_header")
return cls.from_pytable(table)
@classmethod
def from_pytable(cls, table):
data = OrderedDict()
for row in table:
parameter = row["parameter"].decode()
field_names = row["field_names"].decode().split(" ")
field_values = row["field_values"].decode().split(" ")
if field_values == [""]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = row["dtype"].decode()
dtyped_values = []
for dtype, value in zip(dtypes.split(" "), field_values):
if dtype.startswith("a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
class HDF5IndexTable:
def __init__(self, h5loc, start=0):
self.h5loc = h5loc
self._data = defaultdict(list)
self._index = 0
if start > 0:
self._data["indices"] = [0] * start
self._data["n_items"] = [0] * start
def append(self, n_items):
self._data["indices"].append(self._index)
self._data["n_items"].append(n_items)
self._index += n_items
@property
def data(self):
return self._data
def fillup(self, length):
missing = length - len(self)
self._data["indices"] += [self.data["indices"][-1]] * missing
self._data["n_items"] += [0] * missing
def __len__(self):
return len(self.data["indices"])
class HDF5Sink(Module):
"""Write KM3NeT-formatted HDF5 files, event-by-event.
The data can be a ``kp.Table``, a numpy structured array,
a pandas DataFrame, or a simple scalar.
The name of the corresponding H5 table is the decamelised
blob-key, so values which are stored in the blob under `FooBar`
will be written to `/foo_bar` in the HDF5 file.
Parameters
----------
filename: str, optional [default: 'dump.h5']
Where to store the events.
h5file: pytables.File instance, optional [default: None]
Opened file to write to. This is mutually exclusive with filename.
keys: list of strings, optional
List of Blob-keys to write, everything else is ignored.
complib : str [default: zlib]
Compression library that should be used.
'zlib', 'lzf', 'blosc' and all other PyTables filters
are available.
complevel : int [default: 5]
Compression level.
chunksize : int [optional]
Chunksize that should be used for saving along the first axis
of the input array.
flush_frequency: int, optional [default: 500]
The number of iterations to cache tables and arrays before
dumping to disk.
pytab_file_args: dict [optional]
pass more arguments to the pytables File init
n_rows_expected = int, optional [default: 10000]
append: bool, optional [default: False]
reset_group_id: bool, optional [default: True]
Resets the group_id so that it's continuous in the output file.
Use this with care!
Notes
-----
Provides service write_table(tab, h5loc=None): tab:Table, h5loc:str
The table to write, with ".h5loc" set or to h5loc if specified.
"""
def configure(self):
self.filename = self.get("filename", default="dump.h5")
self.ext_h5file = self.get("h5file")
self.keys = self.get("keys", default=[])
self.complib = self.get("complib", default="zlib")
self.complevel = self.get("complevel", default=5)
self.chunksize = self.get("chunksize")
self.flush_frequency = self.get("flush_frequency", default=500)
self.pytab_file_args = self.get("pytab_file_args", default=dict())
self.file_mode = "a" if self.get("append") else "w"
self.keep_open = self.get("keep_open")
self._reset_group_id = self.get("reset_group_id", default=True)
self.indices = {} # to store HDF5IndexTables for each h5loc
self._singletons_written = {}
# magic 10000: this is the default of the "expectedrows" arg
# from the tables.File.create_table() function
# at least according to the docs
# might be able to set to `None`, I don't know...
self.n_rows_expected = self.get("n_rows_expected", default=10000)
self.index = 0
self._uuid = str(uuid4())
self.expose(self.write_table, "write_table")
if self.ext_h5file is not None:
self.h5file = self.ext_h5file
else:
self.h5file = tb.open_file(
self.filename,
mode=self.file_mode,
title="KM3NeT",
**self.pytab_file_args,
)
Provenance().record_output(
self.filename, uuid=self._uuid, comment="HDF5Sink output"
)
self.filters = tb.Filters(
complevel=self.complevel,
shuffle=True,
fletcher32=True,
complib=self.complib,
)
self._tables = OrderedDict()
self._ndarrays = OrderedDict()
self._ndarrays_cache = defaultdict(list)
def _to_array(self, data, name=None):
if data is None:
return
if np.isscalar(data):
self.log.debug("toarray: is a scalar")
return Table(
{name: np.asarray(data).reshape((1,))},
h5loc="/misc/{}".format(decamelise(name)),
name=name,
)
if hasattr(data, "len") and len(data) <= 0: # a bit smelly ;)
self.log.debug("toarray: data has no length")
return
# istype instead isinstance, to avoid heavy pandas import (hmmm...)
if istype(data, "DataFrame"): # noqa
self.log.debug("toarray: pandas dataframe")
data = Table.from_dataframe(data)
return data
def _cache_ndarray(self, arr):
self._ndarrays_cache[arr.h5loc].append(arr)
def _write_ndarrays_cache_to_disk(self):
"""Writes all the cached NDArrays to disk and empties the cache"""
for h5loc, arrs in self._ndarrays_cache.items():
title = arrs[0].title
chunkshape = (
(self.chunksize,) + arrs[0].shape[1:]
if self.chunksize is not None
else None
)
arr = NDArray(np.concatenate(arrs), h5loc=h5loc, title=title)
if h5loc not in self._ndarrays:
loc, tabname = os.path.split(h5loc)
ndarr = self.h5file.create_earray(
loc,
tabname,
tb.Atom.from_dtype(arr.dtype),
(0,) + arr.shape[1:],
chunkshape=chunkshape,
title=title,
filters=self.filters,
createparents=True,
)
self._ndarrays[h5loc] = ndarr
else:
ndarr = self._ndarrays[h5loc]
# for arr_length in (len(a) for a in arrs):
# self._record_index(h5loc, arr_length)
ndarr.append(arr)
self._ndarrays_cache = defaultdict(list)
def write_table(self, table, h5loc=None):
"""Write a single table to the HDF5 file, exposed as a service"""
self.log.debug("Writing table %s", table.name)
if h5loc is None:
h5loc = table.h5loc
self._write_table(h5loc, table, table.name)
def _write_table(self, h5loc, arr, title):
level = len(h5loc.split("/"))
if h5loc not in self._tables:
dtype = arr.dtype
if any("U" in str(dtype.fields[f][0]) for f in dtype.fields):
self.log.error(
"Cannot write data to '{}'. Unicode strings are not supported!".format(
h5loc
)
)
return
loc, tabname = os.path.split(h5loc)
self.log.debug(
"h5loc '{}', Loc '{}', tabname '{}'".format(h5loc, loc, tabname)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", tb.NaturalNameWarning)
tab = self.h5file.create_table(
loc,
tabname,
chunkshape=self.chunksize,
description=dtype,
title=title,
filters=self.filters,
createparents=True,
expectedrows=self.n_rows_expected,
)
tab._v_attrs.datatype = title
if level < 5:
self._tables[h5loc] = tab
else:
tab = self._tables[h5loc]
h5_colnames = set(tab.colnames)
tab_colnames = set(arr.dtype.names)
if h5_colnames != tab_colnames:
missing_cols = h5_colnames - tab_colnames
if missing_cols:
self.log.info("Missing columns in table, trying to append NaNs.")
arr = arr.append_columns(
missing_cols, np.full((len(missing_cols), len(arr)), np.nan)
)
if arr.dtype != tab.dtype:
self.log.error(
"Differing dtypes after appending "
"missing columns to the table! Skipping..."
)
return
if arr.dtype != tab.dtype:
try:
arr = Table(arr, dtype=tab.dtype)
except ValueError:
self.log.critical(
"Cannot write a table to '%s' since its dtype is "
"different compared to the previous table with the same "
"HDF5 location, which was used to fix the dtype of the "
"HDF5 compund type." % h5loc
)
raise
tab.append(arr)
if level < 4:
tab.flush()
def _write_separate_columns(self, where, obj, title):
f = self.h5file
loc, group_name = os.path.split(where)
if where not in f:
group = f.create_group(loc, group_name, title, createparents=True)
group._v_attrs.datatype = title
else:
group = f.get_node(where)
for col, (dt, _) in obj.dtype.fields.items():
data = obj.__array__()[col]
if col not in group:
a = tb.Atom.from_dtype(dt)
arr = f.create_earray(
group, col, a, (0,), col.capitalize(), filters=self.filters
)
else:
arr = getattr(group, col)
arr.append(data)
# create index table
# if where not in self.indices:
# self.indices[where] = HDF5IndexTable(where + "/_indices", start=self.index)
self._record_index(where, len(data), split=True)
def _process_entry(self, key, entry):
self.log.debug("Inspecting {}".format(key))
if (
hasattr(entry, "h5singleton")
and entry.h5singleton
and entry.h5loc in self._singletons_written
):
self.log.debug(
"Skipping '%s' since it's a singleton and already written."
% entry.h5loc
)
return
if not hasattr(entry, "h5loc"):
self.log.debug("Ignoring '%s': no h5loc attribute" % key)
return
if isinstance(entry, NDArray):
self._cache_ndarray(entry)
self._record_index(entry.h5loc, len(entry))
return entry
try:
title = entry.name
except AttributeError:
title = key
if isinstance(entry, Table) and not entry.h5singleton:
if "group_id" not in entry:
entry = entry.append_columns("group_id", self.index)
elif self._reset_group_id:
# reset group_id to the HDF5Sink's continuous counter
entry.group_id = self.index
self.log.debug("h5l: '{}', title '{}'".format(entry.h5loc, title))
if hasattr(entry, "split_h5") and entry.split_h5:
self.log.debug("Writing into separate columns...")
self._write_separate_columns(entry.h5loc, entry, title=title)
else:
self.log.debug("Writing into single Table...")
self._write_table(entry.h5loc, entry, title=title)
if hasattr(entry, "h5singleton") and entry.h5singleton:
self._singletons_written[entry.h5loc] = True
return entry
def process(self, blob):
written_blob = Blob()
for key, entry in sorted(blob.items()):
if self.keys and key not in self.keys:
self.log.info("Skipping blob, since it's not in the keys list")
continue
self.log.debug("Processing %s", key)
data = self._process_entry(key, entry)
if data is not None:
written_blob[key] = data
if "GroupInfo" not in blob:
gi = Table(
{"group_id": self.index, "blob_length": len(written_blob)},
h5loc="/group_info",
name="Group Info",
)
self._process_entry("GroupInfo", gi)
# fill up NDArray indices with 0 entries if needed
if written_blob:
ndarray_h5locs = set(self._ndarrays.keys()).union(
self._ndarrays_cache.keys()
)
written_h5locs = set(
e.h5loc for e in written_blob.values() if isinstance(e, NDArray)
)
missing_h5locs = ndarray_h5locs - written_h5locs
for h5loc in missing_h5locs:
self.log.info("Filling up %s with 0 length entry", h5loc)
self._record_index(h5loc, 0)
if not self.index % self.flush_frequency:
self.flush()
self.index += 1
return blob
def _record_index(self, h5loc, count, split=False):
"""Add an index entry (optionally create table) for an NDArray h5loc.
Parameters
----------
h5loc : str
location in HDF5
count : int
number of elements (can be 0)
split : bool
if it's a split table
"""
suffix = "/_indices" if split else "_indices"
idx_table_h5loc = h5loc + suffix
if idx_table_h5loc not in self.indices:
self.indices[idx_table_h5loc] = HDF5IndexTable(
idx_table_h5loc, start=self.index
)
idx_tab = self.indices[idx_table_h5loc]
idx_tab.append(count)
def flush(self):
"""Flush tables and arrays to disk"""
self.log.info("Flushing tables and arrays to disk...")
for tab in self._tables.values():
tab.flush()
self._write_ndarrays_cache_to_disk()
def finish(self):
self.flush()
self.h5file.root._v_attrs.km3pipe = np.string_(kp.__version__)
self.h5file.root._v_attrs.pytables = np.string_(tb.__version__)
self.h5file.root._v_attrs.kid = np.string_(self._uuid)
self.h5file.root._v_attrs.format_version = np.string_(FORMAT_VERSION)
self.log.info("Adding index tables.")
for where, idx_tab in self.indices.items():
# any skipped NDArrays or split groups will be filled with 0 entries
idx_tab.fillup(self.index)
self.log.debug("Creating index table for '%s'" % where)
h5loc = idx_tab.h5loc
self.log.info(" -> {0}".format(h5loc))
indices = Table(
{"index": idx_tab.data["indices"], "n_items": idx_tab.data["n_items"]},
h5loc=h5loc,
)
self._write_table(h5loc, indices, title="Indices")
self.log.info(
"Creating pytables index tables. " "This may take a few minutes..."
)
for tab in self._tables.values():
if "frame_id" in tab.colnames:
tab.cols.frame_id.create_index()
if "slice_id" in tab.colnames:
tab.cols.slice_id.create_index()
if "dom_id" in tab.colnames:
tab.cols.dom_id.create_index()
if "event_id" in tab.colnames:
try:
tab.cols.event_id.create_index()
except NotImplementedError:
log.warning(
"Table '{}' has an uint64 column, "
"not indexing...".format(tab._v_name)
)
if "group_id" in tab.colnames:
try:
tab.cols.group_id.create_index()
except NotImplementedError:
log.warning(
"Table '{}' has an uint64 column, "
"not indexing...".format(tab._v_name)
)
tab.flush()
if "HDF5MetaData" in self.services:
self.log.info("Writing HDF5 meta data.")
metadata = self.services["HDF5MetaData"]
for name, value in metadata.items():
self.h5file.set_node_attr("/", name, value)
if not self.keep_open:
self.h5file.close()
self.cprint("HDF5 file written to: {}".format(self.filename))
class HDF5Pump(Module):
"""Read KM3NeT-formatted HDF5 files, event-by-event.
Parameters
----------
filename: str
From where to read events. Either this OR ``filenames`` needs to be
defined.
skip_version_check: bool [default: False]
Don't check the H5 version. Might lead to unintended consequences.
shuffle: bool, optional [default: False]
Shuffle the group_ids, so that the blobs are mixed up.
shuffle_function: function, optional [default: np.random.shuffle
The function to be used to shuffle the group IDs.
reset_index: bool, optional [default: True]
When shuffle is set to true, reset the group ID - start to count
the group_id by 0.
Notes
-----
Provides service h5singleton(h5loc): h5loc:str -> kp.Table
Singleton tables for a given HDF5 location.
"""
def configure(self):
self.filename = self.get("filename")
self.skip_version_check = self.get("skip_version_check", default=False)
self.verbose = bool(self.get("verbose"))
self.shuffle = self.get("shuffle", default=False)
self.shuffle_function = self.get("shuffle_function", default=np.random.shuffle)
self.reset_index = self.get("reset_index", default=False)
self.h5file = None
self.cut_mask = None
self.indices = {}
self._tab_indices = {}
self._singletons = {}
self.header = None
self.group_ids = None
self._n_groups = None
self.index = 0
self.h5file = tb.open_file(self.filename, "r")
Provenance().record_input(self.filename, comment="HDF5Pump input")
if not self.skip_version_check:
check_version(self.h5file)
self._read_group_info()
self.expose(self.h5singleton, "h5singleton")
def _read_group_info(self):
h5file = self.h5file
if "/group_info" not in h5file:
self.log.critical("Missing /group_info '%s', aborting..." % h5file.filename)
raise SystemExit
self.log.info("Reading group information from '/group_info'.")
group_info = h5file.get_node("/", "group_info")
self.group_ids = group_info.cols.group_id[:]
self._n_groups = len(self.group_ids)
if "/raw_header" in h5file:
self.log.info("Reading /raw_header")
try:
self.header = HDF5Header.from_pytable(h5file.get_node("/raw_header"))
except TypeError:
self.log.error("Could not parse the raw header, skipping!")
if self.shuffle:
self.log.info("Shuffling group IDs")
self.shuffle_function(self.group_ids)
def h5singleton(self, h5loc):
"""Returns the singleton table for a given HDF5 location"""
return self._singletons[h5loc]
def process(self, blob):
self.log.info("Reading blob at index %s" % self.index)
if self.index >= self._n_groups:
self.log.info("All groups are read.")
raise StopIteration
blob = self.get_blob(self.index)
self.index += 1
return blob
def get_blob(self, index):
blob = Blob()
group_id = self.group_ids[index]
# skip groups with separate columns
# and deal with them later
# this should be solved using hdf5 attributes in near future
split_table_locs = []
ndarray_locs = []
for tab in self.h5file.walk_nodes(classname="Table"):
h5loc = tab._v_pathname
loc, tabname = os.path.split(h5loc)
if tabname in self.indices:
self.log.info("index table '%s' already read, skip..." % h5loc)
continue
if loc in split_table_locs:
self.log.info("get_blob: '%s' is noted, skip..." % h5loc)
continue
if tabname == "_indices":
self.log.debug("get_blob: found index table '%s'" % h5loc)
split_table_locs.append(loc)
self.indices[loc] = self.h5file.get_node(h5loc)
continue
if tabname.endswith("_indices"):
self.log.debug("get_blob: found index table '%s' for NDArray" % h5loc)
ndarr_loc = h5loc.replace("_indices", "")
ndarray_locs.append(ndarr_loc)
if ndarr_loc in self.indices:
self.log.info(
"index table for NDArray '%s' already read, skip..." % ndarr_loc
)
continue
_index_table = self.h5file.get_node(h5loc)
self.indices[ndarr_loc] = {
"index": _index_table.col("index")[:],
"n_items": _index_table.col("n_items")[:],
}
continue
tabname = camelise(tabname)
if "group_id" in tab.dtype.names:
try:
if h5loc not in self._tab_indices:
self._read_tab_indices(h5loc)
tab_idx_start = self._tab_indices[h5loc][0][group_id]
tab_n_items = self._tab_indices[h5loc][1][group_id]
if tab_n_items == 0:
continue
arr = tab[tab_idx_start : tab_idx_start + tab_n_items]
except IndexError:
self.log.debug("No data for h5loc '%s'" % h5loc)
continue
except NotImplementedError:
# 64-bit unsigned integer columns like ``group_id``
# are not yet supported in conditions
self.log.debug(
"get_blob: found uint64 column at '{}'...".format(h5loc)
)
arr = tab.read()
arr = arr[arr["group_id"] == group_id]
except ValueError:
# "there are no columns taking part
# in condition ``group_id == 0``"
self.log.info(
"get_blob: no `%s` column found in '%s'! "
"skipping... " % ("group_id", h5loc)
)
continue
else:
if h5loc not in self._singletons:
log.info("Caching H5 singleton: {} ({})".format(tabname, h5loc))
self._singletons[h5loc] = Table(
tab.read(),
h5loc=h5loc,
split_h5=False,
name=tabname,
h5singleton=True,
)
blob[tabname] = self._singletons[h5loc]
continue
self.log.debug("h5loc: '{}'".format(h5loc))
tab = Table(arr, h5loc=h5loc, split_h5=False, name=tabname)
if self.shuffle and self.reset_index:
tab.group_id[:] = index
blob[tabname] = tab
# skipped locs are now column wise datasets (usually hits)
# currently hardcoded, in future using hdf5 attributes
# to get the right constructor
for loc in split_table_locs:
# if some events are missing (group_id not continuous),
# this does not work as intended
# idx, n_items = self.indices[loc][group_id]
idx = self.indices[loc].col("index")[group_id]
n_items = self.indices[loc].col("n_items")[group_id]
end = idx + n_items
node = self.h5file.get_node(loc)
columns = (c for c in node._v_children if c != "_indices")
data = {}
for col in columns:
data[col] = self.h5file.get_node(loc + "/" + col)[idx:end]
tabname = camelise(loc.split("/")[-1])
s_tab = Table(data, h5loc=loc, split_h5=True, name=tabname)
if self.shuffle and self.reset_index:
s_tab.group_id[:] = index
blob[tabname] = s_tab
if self.header is not None:
blob["Header"] = self.header
for ndarr_loc in ndarray_locs:
self.log.info("Reading %s" % ndarr_loc)
try:
idx = self.indices[ndarr_loc]["index"][group_id]
n_items = self.indices[ndarr_loc]["n_items"][group_id]
except IndexError:
continue
end = idx + n_items
ndarr = self.h5file.get_node(ndarr_loc)
ndarr_name = camelise(ndarr_loc.split("/")[-1])
_ndarr = NDArray(
ndarr[idx:end], h5loc=ndarr_loc, title=ndarr.title, group_id=group_id
)
if self.shuffle and self.reset_index:
_ndarr.group_id = index
blob[ndarr_name] = _ndarr
return blob
def _read_tab_indices(self, h5loc):
self.log.info("Reading table indices for '{}'".format(h5loc))
node = self.h5file.get_node(h5loc)
group_ids = None
if "group_id" in node.dtype.names:
group_ids = self.h5file.get_node(h5loc).cols.group_id[:]
else:
self.log.error("No data found in '{}'".format(h5loc))
return
self._tab_indices[h5loc] = create_index_tuple(group_ids)
def __len__(self):
self.log.info("Opening all HDF5 files to check the number of groups")
n_groups = 0
for filename in self.filenames:
with tb.open_file(filename, "r") as h5file:
group_info = h5file.get_node("/", "group_info")
self.group_ids = group_info.cols.group_id[:]
n_groups += len(self.group_ids)
return n_groups
def __iter__(self):
return self
def __next__(self):
# TODO: wrap that in self._check_if_next_file_is_needed(self.index)
if self.index >= self._n_groups:
self.log.info("All groups are read")
raise StopIteration
blob = self.get_blob(self.index)
self.index += 1
return blob
def __getitem__(self, index):
if isinstance(index, int):
return self.get_blob(index)
elif isinstance(index, slice):
return self._slice_generator(index)
else:
raise TypeError("index must be int or slice")
def _slice_generator(self, index):
"""A simple slice generator for iterations"""
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i)
self.filename = None
def _close_h5file(self):
if self.h5file:
self.h5file.close()
def finish(self):
self._close_h5file()
@jit
def create_index_tuple(group_ids):
"""An helper function to create index tuples for fast lookup in HDF5Pump"""
max_group_id = np.max(group_ids)
start_idx_arr = np.full(max_group_id + 1, 0)
n_items_arr = np.full(max_group_id + 1, 0)
current_group_id = group_ids[0]
current_idx = 0
item_count = 0
for group_id in group_ids:
if group_id != current_group_id:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
current_idx += item_count
item_count = 0
current_group_id = group_id
item_count += 1
else:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
return (start_idx_arr, n_items_arr)
class HDF5MetaData(Module):
"""Metadata to attach to the HDF5 file.
Parameters
----------
data: dict
"""
def configure(self):
self.data = self.require("data")
self.expose(self.data, "HDF5MetaData")
@singledispatch
def header2table(data):
"""Convert a header to an `HDF5Header` compliant `kp.Table`"""
print(f"Unsupported header data of type {type(data)}")
@header2table.register(dict)
def _(header_dict):
if not header_dict:
print("Empty header dictionary.")
return
tab_dict = defaultdict(list)
for parameter, data in header_dict.items():
fields = []
values = []
types = []
for field_name, field_value in data.items():
fields.append(field_name)
values.append(str(field_value))
try:
_ = float(field_value) # noqa
types.append("f4")
except ValueError:
types.append("a{}".format(len(field_value)))
except TypeError: # e.g. values is None
types.append("a{}".format(len(str(field_value))))
tab_dict["parameter"].append(parameter.encode())
tab_dict["field_names"].append(" ".join(fields).encode())
tab_dict["field_values"].append(" ".join(values).encode())
tab_dict["dtype"].append(" ".join(types).encode())
log.debug(
"{}: {} {} {}".format(
tab_dict["parameter"][-1],
tab_dict["field_names"][-1],
tab_dict["field_values"][-1],
tab_dict["dtype"][-1],
)
)
return Table(tab_dict, h5loc="/raw_header", name="RawHeader", h5singleton=True)
@header2table.register(km3io.offline.Header)
def _(header):
out = {}
for parameter, values in header._data.items():
try:
values = values._asdict()
except AttributeError:
# single entry without further parameter name
# in specification
values = {parameter + "_0": values}
out[parameter] = values
return header2table(out)
@header2table.register(HDF5Header)
def _(header):
return header2table(header._data)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import utils as hbase_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(hbase_base.Connection, base.Connection):
"""Put the event data into a HBase database
Collections:
- events:
- row_key: timestamp of event's generation + uuid of event
in format: "%s:%s" % (ts, Event.message_id)
- Column Families:
f: contains the following qualifiers:
- event_type: description of event's type
- timestamp: time stamp of event generation
- all traits for this event in format:
.. code-block:: python
"%s:%s" % (trait_name, trait_type)
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
_memory_instance = None
EVENT_TABLE = "event"
def __init__(self, url):
super(Connection, self).__init__(url)
def upgrade(self):
tables = [self.EVENT_TABLE]
column_families = {'f': dict(max_versions=1)}
with self.conn_pool.connection() as conn:
hbase_utils.create_tables(conn, tables, column_families)
def clear(self):
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.EVENT_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug('Cannot delete table but ignoring error')
def record_events(self, event_models):
"""Write the events to Hbase.
:param event_models: a list of models.Event objects.
"""
error = None
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
for event_model in event_models:
# Row key consists of timestamp and message_id from
# models.Event or purposes of storage event sorted by
# timestamp in the database.
ts = event_model.generated
row = hbase_utils.prepare_key(
hbase_utils.timestamp(ts, reverse=False),
event_model.message_id)
event_type = event_model.event_type
traits = {}
if event_model.traits:
for trait in event_model.traits:
key = hbase_utils.prepare_key(trait.name, trait.dtype)
traits[key] = trait.value
record = hbase_utils.serialize_entry(traits,
event_type=event_type,
timestamp=ts,
raw=event_model.raw)
try:
events_table.put(row, record)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
"""
if limit == 0:
return
q, start, stop = hbase_utils.make_events_query_from_filter(
event_filter)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q, row_start=start, row_stop=stop,
limit=limit)
for event_id, data in gen:
traits = []
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_dtype = key
traits.append(models.Trait(name=trait_name,
dtype=int(trait_dtype),
value=value))
ts, mess = event_id.split(':')
yield models.Event(
message_id=hbase_utils.unquote(mess),
event_type=events_dict['event_type'],
generated=events_dict['timestamp'],
traits=sorted(traits,
key=operator.attrgetter('dtype')),
raw=events_dict['raw']
)
def get_event_types(self):
"""Return all event types as an iterable of strings."""
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan()
event_types = set()
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if not isinstance(key, tuple) and key.startswith('event_type'):
if value not in event_types:
event_types.add(value)
yield value
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
q = hbase_utils.make_query(event_type=event_type)
trait_names = set()
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types, for ex. if it is found the same trait
# types in different events with equal event_type,
# method will return only one trait type. It is
# proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
data_type = models.Trait.type_names[int(trait_type)]
yield {'name': trait_name, 'data_type': data_type}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
q = hbase_utils.make_query(event_type=event_type,
trait_type=trait_type)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
yield models.Trait(name=trait_name,
dtype=int(trait_type), value=value)
|
|
"""CoolMasterNet platform to control of CoolMasteNet Climate Devices."""
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
HVAC_MODE_OFF,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_PORT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
DEFAULT_PORT = 10102
AVAILABLE_MODES = [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_FAN_ONLY,
]
CM_TO_HA_STATE = {
"heat": HVAC_MODE_HEAT,
"cool": HVAC_MODE_COOL,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY,
"fan": HVAC_MODE_FAN_ONLY,
}
HA_STATE_TO_CM = {value: key for key, value in CM_TO_HA_STATE.items()}
FAN_MODES = ["low", "med", "high", "auto"]
CONF_SUPPORTED_MODES = "supported_modes"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SUPPORTED_MODES, default=AVAILABLE_MODES): vol.All(
cv.ensure_list, [vol.In(AVAILABLE_MODES)]
),
}
)
_LOGGER = logging.getLogger(__name__)
def _build_entity(device, supported_modes):
_LOGGER.debug("Found device %s", device.uid)
return CoolmasterClimate(device, supported_modes)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CoolMasterNet climate platform."""
from pycoolmasternet import CoolMasterNet
supported_modes = config.get(CONF_SUPPORTED_MODES)
host = config[CONF_HOST]
port = config[CONF_PORT]
cool = CoolMasterNet(host, port=port)
devices = cool.devices()
all_devices = [_build_entity(device, supported_modes) for device in devices]
add_entities(all_devices, True)
class CoolmasterClimate(ClimateDevice):
"""Representation of a coolmaster climate device."""
def __init__(self, device, supported_modes):
"""Initialize the climate device."""
self._device = device
self._uid = device.uid
self._hvac_modes = supported_modes
self._hvac_mode = None
self._target_temperature = None
self._current_temperature = None
self._current_fan_mode = None
self._current_operation = None
self._on = None
self._unit = None
def update(self):
"""Pull state from CoolMasterNet."""
status = self._device.status
self._target_temperature = status["thermostat"]
self._current_temperature = status["temperature"]
self._current_fan_mode = status["fan_speed"]
self._on = status["is_on"]
device_mode = status["mode"]
if self._on:
self._hvac_mode = CM_TO_HA_STATE[device_mode]
else:
self._hvac_mode = HVAC_MODE_OFF
if status["unit"] == "celsius":
self._unit = TEMP_CELSIUS
else:
self._unit = TEMP_FAHRENHEIT
@property
def unique_id(self):
"""Return unique ID for this device."""
return self._uid
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the climate device."""
return self.unique_id
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we are trying to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return hvac target hvac state."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return FAN_MODES
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
_LOGGER.debug("Setting temp of %s to %s", self.unique_id, str(temp))
self._device.set_thermostat(str(temp))
def set_fan_mode(self, fan_mode):
"""Set new fan mode."""
_LOGGER.debug("Setting fan mode of %s to %s", self.unique_id, fan_mode)
self._device.set_fan_speed(fan_mode)
def set_hvac_mode(self, hvac_mode):
"""Set new operation mode."""
_LOGGER.debug("Setting operation mode of %s to %s", self.unique_id, hvac_mode)
if hvac_mode == HVAC_MODE_OFF:
self.turn_off()
else:
self._device.set_mode(HA_STATE_TO_CM[hvac_mode])
self.turn_on()
def turn_on(self):
"""Turn on."""
_LOGGER.debug("Turning %s on", self.unique_id)
self._device.turn_on()
def turn_off(self):
"""Turn off."""
_LOGGER.debug("Turning %s off", self.unique_id)
self._device.turn_off()
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import weakref
from collections.abc import Mapping
from flask import flash, g, has_request_context, request, session
from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.csrf.core import CSRF
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields.core import FieldList
from wtforms.form import FormMeta
from wtforms.widgets.core import HiddenInput
from indico.core import signals
from indico.core.auth import multipass
from indico.util.i18n import _
from indico.util.signals import values_from_signal
from indico.util.string import strip_whitespace
from indico.web.flask.util import url_for
from indico.web.util import get_request_user
class _DataWrapper:
"""Wrapper for the return value of generated_data properties."""
def __init__(self, data):
self.data = data
def __repr__(self):
return f'<DataWrapper({self.data!r})>'
class generated_data(property):
"""Property decorator for generated data in forms."""
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
return _DataWrapper(self.fget(obj))
class IndicoFormMeta(FormMeta):
def __call__(cls, *args, **kwargs):
# If we are instantiating a form that was just extended, don't
# send the signal again - it's pointless to extend the extended
# form and doing so could actually result in infinite recursion
# if the signal receiver didn't specify a sender.
if kwargs.pop('__extended', False):
return super().__call__(*args, **kwargs)
extra_fields = values_from_signal(signals.core.add_form_fields.send(cls))
# If there are no extra fields, we don't need any custom logic
# and simply create an instance of the original form.
if not extra_fields:
return super().__call__(*args, **kwargs)
kwargs['__extended'] = True
ext_cls = type('_Extended' + cls.__name__, (cls,), {})
for name, field in extra_fields:
name = 'ext__' + name
if hasattr(ext_cls, name):
raise RuntimeError(f'Field name collision in {cls.__name__}: {name}')
setattr(ext_cls, name, field)
return ext_cls(*args, **kwargs)
class IndicoFormCSRF(CSRF):
def generate_csrf_token(self, csrf_token_field):
return session.csrf_token
def validate_csrf_token(self, form, field):
if field.current_token == field.data:
return
if not g.get('flashed_csrf_message'):
# Only flash the message once per request. We may end up in here
# multiple times if `validate()` is called more than once
flash(_('It looks like there was a problem with your current session. Please submit the form again.'),
'error')
g.flashed_csrf_message = True
raise ValidationError(_('Invalid CSRF token'))
class IndicoForm(FlaskForm, metaclass=IndicoFormMeta):
class Meta:
csrf = True
csrf_class = IndicoFormCSRF
def bind_field(self, form, unbound_field, options):
# We don't set default filters for query-based fields as it breaks them if no query_factory is set
# while the Form is instantiated. Also, it's quite pointless for those fields...
# FieldList simply doesn't support filters.
no_filter_fields = (QuerySelectField, FieldList)
filters = [strip_whitespace] if not issubclass(unbound_field.field_class, no_filter_fields) else []
filters += unbound_field.kwargs.get('filters', [])
bound = unbound_field.bind(form=form, filters=filters, **options)
bound.get_form = weakref.ref(form) # GC won't collect the form if we don't use a weakref
return bound
def __init__(self, *args, **kwargs):
csrf_enabled = kwargs.pop('csrf_enabled', None)
if has_request_context() and get_request_user()[1] in ('oauth', 'signed_url'):
# no csrf checks needed since oauth/token/signature auth requires a secret that's not available
# to a malicious site, and even if it was, they wouldn't have to use CSRF to abuse it.
csrf_enabled = False
if csrf_enabled is not None:
# This is exactly what FlaskForm already does, but without
# a deprecation warning.
# Being able to set ``csrf_enabled=False`` is much nicer
# than ``meta={'csrf': False}`` and if we ever need to
# change it for some reason we can always replace it everywhere
kwargs['meta'] = kwargs.get('meta') or {}
kwargs['meta'].setdefault('csrf', csrf_enabled)
super().__init__(*args, **kwargs)
self.ajax_response = None
def process_ajax(self):
"""
Check if the current request is an AJAX request related to a
field in this form and execute the field's AJAX logic.
The response is available in the `ajax_response` attribute
afterwards.
:return: Whether an AJAX response was processed.
"""
field_id = request.args.get('__wtf_ajax')
if not field_id:
return False
field = next((f for f in self._fields.values() if f.id == field_id and isinstance(f, AjaxFieldMixin)), None)
if not field:
return False
rv = field.process_ajax()
self.ajax_response = rv
return True
def validate(self):
valid = super().validate()
if not valid:
return False
if not all(values_from_signal(signals.core.form_validated.send(self), single_value=True)):
return False
self.post_validate()
return True
def post_validate(self):
"""Called after the form was successfully validated.
This method is a good place e.g. to override the data of fields in
certain cases without going through the hassle of generated_data.
"""
def populate_obj(self, obj, fields=None, skip=None, existing_only=False):
"""Populate the given object with form data.
If `fields` is set, only fields from that list are populated.
If `skip` is set, fields in that list are skipped.
If `existing_only` is True, only attributes that already exist on `obj` are populated.
Attributes starting with ``ext__`` are always skipped as they
are from plugin-defined fields which should always be handled
separately.
"""
def _included(field_name):
if fields and field_name not in fields:
return False
if skip and field_name in skip:
return False
if existing_only and not hasattr(obj, field_name):
return False
if field_name.startswith('ext__'):
return False
return True
# Populate data from actual fields
for name, field in self._fields.items():
if not _included(name):
continue
field.populate_obj(obj, name)
# Populate generated data
for name, value in self.generated_data.items():
if not _included(name):
continue
setattr(obj, name, value)
@property
def visible_fields(self):
"""A list containing all fields that are not hidden."""
return [field for field in self if not isinstance(field.widget, HiddenInput)]
@property
def error_list(self):
"""A list containing all errors, prefixed with the field's label.'"""
all_errors = []
for field_name, errors in self.errors.items():
for error in errors:
if isinstance(error, dict) and isinstance(self[field_name], FieldList):
for field in self[field_name].entries:
all_errors += [f'{self[field_name].label.text}: {sub_error}'
for sub_error in field.form.error_list]
else:
all_errors.append(f'{self[field_name].label.text}: {error}')
return all_errors
@property
def generated_data(self):
"""Return a dict containing all generated data."""
cls = type(self)
return {field: getattr(self, field).data
for field in dir(cls)
if isinstance(getattr(cls, field), generated_data)}
@property
def data(self):
"""Extend form.data with generated data from properties."""
data = {k: v
for k, v in super(IndicoForm, self).data.items()
if k != self.meta.csrf_field_name and not k.startswith('ext__')}
data.update(self.generated_data)
return data
class FormDefaults:
"""Simple wrapper to be used for Form(obj=...) default values.
It allows you to specify default values via kwargs or certain attrs from an object.
You can also set attributes directly on this object, which will act just like kwargs
:param obj: The object to get data from
:param attrs: Set of attributes that may be taken from obj
:param skip_attrs: Set of attributes which are never taken from obj
:param defaults: Additional values which are used only if not taken from obj
"""
def __init__(self, obj=None, attrs=None, skip_attrs=None, **defaults):
self.__obj = obj
self.__use_items = isinstance(obj, Mapping)
self.__obj_attrs = attrs
self.__obj_attrs_skip = skip_attrs
self.__defaults = defaults
def __valid_attr(self, name):
"""Check if an attr may be retrieved from the object."""
if self.__obj is None:
return False
if self.__obj_attrs is not None and name not in self.__obj_attrs:
return False
if self.__obj_attrs_skip is not None and name in self.__obj_attrs_skip:
return False
return True
def __setitem__(self, key, value):
self.__defaults[key] = value
def __setattr__(self, key, value):
if key.startswith(f'_{type(self).__name__}__'):
object.__setattr__(self, key, value)
else:
self.__defaults[key] = value
def __getattr__(self, item):
if self.__valid_attr(item):
if self.__use_items:
return self.__obj.get(item, self.__defaults.get(item))
else:
return getattr(self.__obj, item, self.__defaults.get(item))
elif item in self.__defaults:
return self.__defaults[item]
else:
raise AttributeError(item)
def __contains__(self, item):
return hasattr(self, item)
class SyncedInputsMixin:
"""Mixin for a form having inputs using the ``SyncedInputWidget``.
This mixin will process the synced fields, adding them the necessary
attributes for them to render and work properly. The fields which
are synced are defined by ``multipass.synced_fields``.
:param synced_fields: set -- a subset of ``multipass.synced_fields``
which corresponds to the fields currently
being synchronized for the user.
:param synced_values: dict -- a map of all the synced fields (as
defined by ``multipass.synced_fields``) and
the values they would have if they were synced
(regardless of whether it is or not). Fields
not present in this dict do not show the sync
button at all.
"""
def __init__(self, *args, **kwargs):
synced_fields = kwargs.pop('synced_fields', set())
synced_values = kwargs.pop('synced_values', {})
super().__init__(*args, **kwargs)
self.syncable_fields = set(synced_values)
for key in ('first_name', 'last_name'):
if not synced_values.get(key):
synced_values.pop(key, None)
self.syncable_fields.discard(key)
if self.is_submitted():
synced_fields = self.synced_fields
provider = multipass.sync_provider
provider_name = provider.title if provider is not None else 'unknown identity provider'
for field in multipass.synced_fields:
self[field].synced = self[field].short_name in synced_fields
self[field].synced_value = synced_values.get(field)
self[field].provider_name = provider_name
@property
def synced_fields(self):
"""The fields which are set as synced for the current request."""
return set(request.form.getlist('synced_fields')) & self.syncable_fields
class AjaxFieldMixin:
"""Mixin for a Field to be able to handle AJAX requests.
This mixin will allow you to handle AJAX requests during regular
form processing, e.g. when you have a field that needs an AJAX
callback to perform search operations.
To use this mixin, the controllers processing the form must
include the following code::
if form.process_ajax():
return form.ajax_response
It is a good idea to run this code as early as possible to avoid
doing expensive operations like loading a big list of objects
which may be never used when returning early due to the AJAX
request.
"""
def process_ajax(self):
raise NotImplementedError
def get_ajax_url(self, **url_args):
kwargs = request.view_args | request.args.to_dict(False)
kwargs.update(url_args)
kwargs['__wtf_ajax'] = self.id
return url_for(request.endpoint, **kwargs)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8638")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8638")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a applebycoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a applebycoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# -*- coding: utf-8 -*-
import copy
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_USERNAME = MONGO1_USERNAME = 'test_user'
MONGO_PASSWORD = MONGO1_PASSWORD = 'test_pw'
MONGO_DBNAME, MONGO1_DBNAME = 'eve_test', 'eve_test1'
ID_FIELD = '_id'
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
ITEM_METHODS = ['GET', 'PATCH', 'DELETE', 'PUT']
ITEM_CACHE_CONTROL = ''
ITEM_LOOKUP = True
ITEM_LOOKUP_FIELD = ID_FIELD
disabled_bulk = {
'url': 'somebulkurl',
'item_title': 'bulkdisabled',
'bulk_enabled': False,
'schema': {
'string_field': {
'type': 'string'
}
}
}
contacts = {
'url': 'arbitraryurl',
'cache_control': 'max-age=20,must-revalidate',
'cache_expires': 20,
'item_title': 'contact',
'additional_lookup': {
'url': 'regex("[\w]+")', # to be unique field
'field': 'ref'
},
'datasource': {'filter': {'username': {'$exists': False}}},
'schema': {
'ref': {
'type': 'string',
'minlength': 25,
'maxlength': 25,
'required': True,
'unique': True,
},
'media': {
'type': 'media'
},
'prog': {
'type': 'integer'
},
'role': {
'type': 'list',
'allowed': ["agent", "client", "vendor"],
},
'rows': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'sku': {'type': 'string', 'maxlength': 10},
'price': {'type': 'integer'},
},
},
},
'alist': {
'type': 'list',
'items': [{'type': 'string'}, {'type': 'integer'}, ]
},
'location': {
'type': 'dict',
'schema': {
'address': {'type': 'string'},
'city': {'type': 'string', 'required': True}
},
},
'born': {
'type': 'datetime',
},
'tid': {
'type': 'objectid',
'nullable': True
},
'title': {
'type': 'string',
'default': 'Mr.',
},
'id_list': {
'type': 'list',
'schema': {'type': 'objectid'}
},
'id_list_of_dict': {
'type': 'list',
'schema': {'type': 'dict', 'schema': {'id': {'type': 'objectid'}}}
},
'id_list_fixed_len': {
'type': 'list',
'items': [{'type': 'objectid'}]
},
'dependency_field1': {
'type': 'string',
'default': 'default'
},
'dependency_field2': {
'type': 'string',
'dependencies': ['dependency_field1']
},
'dependency_field3': {
'type': 'string',
'dependencies': {'dependency_field1': 'value'}
},
'read_only_field': {
'type': 'string',
'default': 'default',
'readonly': True
},
'dict_with_read_only': {
'type': 'dict',
'schema': {
'read_only_in_dict': {
'type': 'string',
'default': 'default',
'readonly': True
}
}
},
'key1': {
'type': 'string',
},
'propertyschema_dict': {
'type': 'dict',
'propertyschema': {'type': 'string', 'regex': '[a-z]+'}
},
'valueschema_dict': {
'type': 'dict',
'valueschema': {'type': 'integer'}
},
'aninteger': {
'type': 'integer',
},
'afloat': {
'type': 'float',
},
'anumber': {
'type': 'number'
},
'dict_valueschema': {
'type': 'dict',
'valueschema': {
'type': 'dict',
'schema': {
'challenge': {'type': 'objectid'}
}
}
}
}
}
users = copy.deepcopy(contacts)
users['url'] = 'users'
users['datasource'] = {'source': 'contacts',
'filter': {'username': {'$exists': True}},
'projection': {'username': 1, 'ref': 1}}
users['schema']['username'] = {'type': 'string', 'required': True}
users['resource_methods'] = ['DELETE', 'POST', 'GET']
users['item_title'] = 'user'
users['additional_lookup']['field'] = 'username'
invoices = {
'schema': {
'inv_number': {'type': 'string'},
'person': {
'type': 'objectid',
'data_relation': {'resource': 'contacts'}
},
'invoicing_contacts': {
'type': 'list',
'data_relation': {'resource': 'contacts'}
}
}
}
# This resource is used to test app initialization when using resource
# level versioning
versioned_invoices = copy.deepcopy(invoices)
versioned_invoices['versioning'] = True
# This resource is used to test subresources that have a reference/objectid
# field that is set to be required.
required_invoices = copy.deepcopy(invoices)
required_invoices['schema']['person']['required'] = True
companies = {
'item_title': 'company',
'schema': {
'departments': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'title': {'type': 'string'},
'members': {
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {'resource': 'contacts'},
}
}
}
}
},
'holding': {
'type': 'objectid',
'data_relation': {'resource': 'companies'},
}
}
}
users_overseas = copy.deepcopy(users)
users_overseas['url'] = 'users/overseas'
users_overseas['datasource'] = {'source': 'contacts'}
payments = {
'resource_methods': ['GET'],
'item_methods': ['GET'],
}
empty = copy.deepcopy(invoices)
user_restricted_access = copy.deepcopy(contacts)
user_restricted_access['url'] = 'restricted'
user_restricted_access['datasource'] = {'source': 'contacts'}
users_invoices = copy.deepcopy(invoices)
users_invoices['url'] = 'users/<regex("[a-f0-9]{24}"):person>/invoices'
users_invoices['datasource'] = {'source': 'invoices'}
users_required_invoices = copy.deepcopy(required_invoices)
users_required_invoices['url'] =\
'users/<regex("[a-f0-9]{24}"):person>/required_invoices'
users_required_invoices['datasource'] = {'source': 'required_invoices'}
users_searches = copy.deepcopy(invoices)
users_searches['datasource'] = {'source': 'invoices'}
users_searches['url'] = \
'users/<regex("[a-zA-Z0-9:\\-\\.]+"):person>/saved_searches'
internal_transactions = {
'resource_methods': ['GET'],
'item_methods': ['GET'],
'internal_resource': True
}
ids = {
'query_objectid_as_string': True,
'item_lookup_field': 'id',
'resource_methods': ['POST', 'GET'],
'schema': {
'id': {'type': 'string'},
'name': {'type': 'string'}
}
}
login = {
'item_title': 'login',
'url': 'login',
'datasource': {
'projection': {
'password': 0
}
},
'schema': {
'email': {
'type': 'string',
'required': True,
'unique': True
},
'password': {
'type': 'string',
'required': True
}
}
}
# This resource is used to test resource-specific id fields.
products = {
'id_field': 'sku',
'item_lookup_field': 'sku',
'item_url': 'regex("[A-Z]+")',
'schema': {
'sku': {
'type': 'string',
'maxlength': 16
},
'title': {
'type': 'string',
'minlength': 4,
'maxlength': 32
},
'parent_product': {
'type': 'string',
'data_relation': {'resource': 'products'}
}
}
}
child_products = copy.deepcopy(products)
child_products['url'] = 'products/<regex("[A-Z]+"):parent_product>/children'
child_products['datasource'] = {'source': 'products'}
exclusion = copy.deepcopy(contacts)
exclusion['url'] = 'exclusion'
exclusion['soft_delete'] = True
exclusion['datasource']['source'] = 'contacts'
exclusion['datasource']['projection'] = {'int': 0}
DOMAIN = {
'disabled_bulk': disabled_bulk,
'contacts': contacts,
'users': users,
'users_overseas': users_overseas,
'invoices': invoices,
'versioned_invoices': versioned_invoices,
'required_invoices': required_invoices,
'payments': payments,
'empty': empty,
'restricted': user_restricted_access,
'peopleinvoices': users_invoices,
'peoplerequiredinvoices': users_required_invoices,
'peoplesearches': users_searches,
'companies': companies,
'internal_transactions': internal_transactions,
'ids': ids,
'login': login,
'products': products,
'child_products': child_products,
'exclusion': exclusion,
}
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2018-2019
# This module contains implementations of job monitoring tasks
import os
import time
from subprocess import PIPE
from glob import glob
from pilot.common.errorcodes import ErrorCodes
from pilot.util.auxiliary import set_pilot_state, show_memory_usage
from pilot.util.config import config
from pilot.util.container import execute
from pilot.util.filehandling import get_directory_size, remove_files, get_local_file_size, read_file
from pilot.util.loopingjob import looping_job
from pilot.util.math import convert_mb_to_b, human2bytes
from pilot.util.parameters import convert_to_int, get_maximum_input_sizes
from pilot.util.processes import get_current_cpu_consumption_time, kill_processes, get_number_of_child_processes
from pilot.util.workernode import get_local_disk_space, check_hz
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def job_monitor_tasks(job, mt, args):
"""
Perform the tasks for the job monitoring.
The function is called once a minute. Individual checks will be performed at any desired time interval (>= 1
minute).
:param job: job object.
:param mt: `MonitoringTime` object.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return: exit code (int), diagnostics (string).
"""
exit_code = 0
diagnostics = ""
current_time = int(time.time())
# update timing info for running jobs (to avoid an update after the job has finished)
if job.state == 'running':
# confirm that the worker node has a proper SC_CLK_TCK (problems seen on MPPMU)
check_hz()
try:
cpuconsumptiontime = get_current_cpu_consumption_time(job.pid)
except Exception as e:
diagnostics = "Exception caught: %s" % e
logger.warning(diagnostics)
exit_code = get_exception_error_code(diagnostics)
return exit_code, diagnostics
else:
job.cpuconsumptiontime = int(round(cpuconsumptiontime))
job.cpuconsumptionunit = "s"
job.cpuconversionfactor = 1.0
logger.info('CPU consumption time for pid=%d: %f (rounded to %d)' % (job.pid, cpuconsumptiontime, job.cpuconsumptiontime))
# check how many cores the payload is using
set_number_used_cores(job)
# check memory usage (optional) for jobs in running state
exit_code, diagnostics = verify_memory_usage(current_time, mt, job)
if exit_code != 0:
return exit_code, diagnostics
# display OOM process info
display_oom_info(job.pid)
# should the pilot abort the payload?
exit_code, diagnostics = should_abort_payload(current_time, mt)
if exit_code != 0:
return exit_code, diagnostics
# is it time to verify the pilot running time?
# exit_code, diagnostics = verify_pilot_running_time(current_time, mt, job)
# if exit_code != 0:
# return exit_code, diagnostics
# should the proxy be verified?
if args.verify_proxy:
exit_code, diagnostics = verify_user_proxy(current_time, mt)
if exit_code != 0:
return exit_code, diagnostics
# is it time to check for looping jobs?
exit_code, diagnostics = verify_looping_job(current_time, mt, job)
if exit_code != 0:
return exit_code, diagnostics
# is the job using too much space?
exit_code, diagnostics = verify_disk_usage(current_time, mt, job)
if exit_code != 0:
return exit_code, diagnostics
# is it time to verify the number of running processes?
if job.pid:
exit_code, diagnostics = verify_running_processes(current_time, mt, job.pid)
if exit_code != 0:
return exit_code, diagnostics
# make sure that any utility commands are still running
if job.utilities != {}:
utility_monitor(job)
return exit_code, diagnostics
def display_oom_info(payload_pid):
"""
Display OOM process info.
:param payload_pid: payload pid (int).
"""
payload_score = get_score(payload_pid) if payload_pid else 'UNKNOWN'
pilot_score = get_score(os.getpid())
logger.info('oom_score(pilot) = %s, oom_score(payload) = %s' % (pilot_score, payload_score))
def get_score(pid):
"""
Get the OOM process score.
:param pid: process id (int).
:return: score (string).
"""
try:
score = '%s' % read_file('/proc/%d/oom_score' % pid)
except Exception as e:
logger.warning('caught exception reading oom_score: %s' % e)
score = 'UNKNOWN'
else:
if score.endswith('\n'):
score = score[:-1]
return score
def get_exception_error_code(diagnostics):
"""
Identify a suitable error code to a given exception.
:param diagnostics: exception diagnostics (string).
:return: exit_code
"""
import traceback
logger.warning(traceback.format_exc())
if "Resource temporarily unavailable" in diagnostics:
exit_code = errors.RESOURCEUNAVAILABLE
elif "No such file or directory" in diagnostics:
exit_code = errors.STATFILEPROBLEM
elif "No such process" in diagnostics:
exit_code = errors.NOSUCHPROCESS
else:
exit_code = errors.GENERALCPUCALCPROBLEM
return exit_code
def set_number_used_cores(job):
"""
Set the number of cores used by the payload.
The number of actual used cores is reported with job metrics (if set).
:param job: job object.
:return:
"""
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
cpu = __import__('pilot.user.%s.cpu' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
cpu.set_core_counts(job)
def verify_memory_usage(current_time, mt, job):
"""
Verify the memory usage (optional).
Note: this function relies on a stand-alone memory monitor tool that may be executed by the Pilot.
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:param job: job object.
:return: exit code (int), error diagnostics (string).
"""
show_memory_usage()
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
memory = __import__('pilot.user.%s.memory' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
if not memory.allow_memory_usage_verifications():
return 0, ""
# is it time to verify the memory usage?
memory_verification_time = convert_to_int(config.Pilot.memory_usage_verification_time, default=60)
if current_time - mt.get('ct_memory') > memory_verification_time:
# is the used memory within the allowed limit?
try:
exit_code, diagnostics = memory.memory_usage(job)
except Exception as e:
logger.warning('caught exception: %s' % e)
exit_code = -1
if exit_code != 0:
logger.warning('ignoring failure to parse memory monitor output')
#return exit_code, diagnostics
else:
# update the ct_proxy with the current time
mt.update('ct_memory')
return 0, ""
def should_abort_payload(current_time, mt):
"""
Should the pilot abort the payload?
In the case of Raythena, the Driver is monitoring the time to end jobs and may decide
that the pilot should abort the payload. Internally, this is achieved by letting the Actors
know it's time to end, and they in turn contacts the pilot by placing a 'pilot_kill_payload' file
in the run directory.
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:return: exit code (int), error diagnostics (string).
"""
# is it time to look for the kill instruction file?
killing_time = convert_to_int(config.Pilot.kill_instruction_time, default=600)
if current_time - mt.get('ct_kill') > killing_time:
path = os.path.join(os.environ.get('PILOT_HOME'), config.Pilot.kill_instruction_filename)
if os.path.exists(path):
logger.info('pilot encountered payload kill instruction file - will abort payload')
return errors.KILLPAYLOAD, "" # note, this is not an error
return 0, ""
def verify_user_proxy(current_time, mt):
"""
Verify the user proxy.
This function is called by the job_monitor_tasks() function.
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:return: exit code (int), error diagnostics (string).
"""
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
userproxy = __import__('pilot.user.%s.proxy' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
# is it time to verify the proxy?
proxy_verification_time = convert_to_int(config.Pilot.proxy_verification_time, default=600)
if current_time - mt.get('ct_proxy') > proxy_verification_time:
# is the proxy still valid?
exit_code, diagnostics = userproxy.verify_proxy(test=False) # use test=True to test expired proxy
if exit_code != 0:
return exit_code, diagnostics
else:
# update the ct_proxy with the current time
mt.update('ct_proxy')
return 0, ""
def verify_looping_job(current_time, mt, job):
"""
Verify that the job is not looping.
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:param job: job object.
:return: exit code (int), error diagnostics (string).
"""
looping_verification_time = convert_to_int(config.Pilot.looping_verification_time, default=600)
if current_time - mt.get('ct_looping') > looping_verification_time:
# is the job looping?
try:
exit_code, diagnostics = looping_job(job, mt)
except Exception as e:
diagnostics = 'exception caught in looping job algorithm: %s' % e
logger.warning(diagnostics)
if "No module named" in diagnostics:
exit_code = errors.BLACKHOLE
else:
exit_code = errors.UNKNOWNEXCEPTION
return exit_code, diagnostics
else:
if exit_code != 0:
return exit_code, diagnostics
# update the ct_proxy with the current time
mt.update('ct_looping')
return 0, ""
def verify_disk_usage(current_time, mt, job):
"""
Verify the disk usage.
The function checks 1) payload stdout size, 2) local space, 3) work directory size, 4) output file sizes.
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:param job: job object.
:return: exit code (int), error diagnostics (string).
"""
disk_space_verification_time = convert_to_int(config.Pilot.disk_space_verification_time, default=300)
if current_time - mt.get('ct_diskspace') > disk_space_verification_time:
# time to check the disk space
# check the size of the payload stdout
exit_code, diagnostics = check_payload_stdout(job)
if exit_code != 0:
return exit_code, diagnostics
# check the local space, if it's enough left to keep running the job
exit_code, diagnostics = check_local_space()
if exit_code != 0:
return exit_code, diagnostics
# check the size of the workdir
exit_code, diagnostics = check_work_dir(job)
if exit_code != 0:
return exit_code, diagnostics
# check the output file sizes
exit_code, diagnostics = check_output_file_sizes(job)
if exit_code != 0:
return exit_code, diagnostics
# update the ct_diskspace with the current time
mt.update('ct_diskspace')
return 0, ""
def verify_running_processes(current_time, mt, pid):
"""
Verify the number of running processes.
The function sets the environmental variable PILOT_MAXNPROC to the maximum number of found (child) processes
corresponding to the main payload process id.
The function does not return an error code (always returns exit code 0).
:param current_time: current time at the start of the monitoring loop (int).
:param mt: measured time object.
:param pid: payload process id (int).
:return: exit code (int), error diagnostics (string).
"""
nproc_env = 0
process_verification_time = convert_to_int(config.Pilot.process_verification_time, default=300)
if current_time - mt.get('ct_process') > process_verification_time:
# time to check the number of processes
nproc = get_number_of_child_processes(pid)
try:
nproc_env = int(os.environ.get('PILOT_MAXNPROC', 0))
except Exception as e:
logger.warning('failed to convert PILOT_MAXNPROC to int: %s' % e)
else:
if nproc > nproc_env:
# set the maximum number of found processes
os.environ['PILOT_MAXNPROC'] = str(nproc)
if nproc_env > 0:
logger.info('maximum number of monitored processes: %d' % nproc_env)
return 0, ""
def utility_monitor(job):
"""
Make sure that any utility commands are still running.
In case a utility tool has crashed, this function may restart the process.
The function is used by the job monitor thread.
:param job: job object.
:return:
"""
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
usercommon = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
# loop over all utilities
for utcmd in list(job.utilities.keys()): # E.g. utcmd = MemoryMonitor, Python 2/3
# make sure the subprocess is still running
utproc = job.utilities[utcmd][0]
if not utproc.poll() is None:
if job.state == 'finished' or job.state == 'failed' or job.state == 'stageout':
logger.debug('no need to restart utility command since payload has finished running')
continue
# if poll() returns anything but None it means that the subprocess has ended - which it
# should not have done by itself
utility_subprocess_launches = job.utilities[utcmd][1]
if utility_subprocess_launches <= 5:
logger.warning('detected crashed utility subprocess - will restart it')
utility_command = job.utilities[utcmd][2]
try:
proc1 = execute(utility_command, workdir=job.workdir, returnproc=True, usecontainer=False,
stdout=PIPE, stderr=PIPE, cwd=job.workdir, queuedata=job.infosys.queuedata)
except Exception as e:
logger.error('could not execute: %s' % e)
else:
# store process handle in job object, and keep track on how many times the
# command has been launched
job.utilities[utcmd] = [proc1, utility_subprocess_launches + 1, utility_command]
else:
logger.warning('detected crashed utility subprocess - too many restarts, will not restart %s again' % utcmd)
else: # check the utility output (the selector option adds a substring to the output file name)
filename = usercommon.get_utility_command_output_filename(utcmd, selector=True)
path = os.path.join(job.workdir, filename)
if not os.path.exists(path):
logger.warning('file: %s does not exist' % path)
time.sleep(10)
def get_local_size_limit_stdout(bytes=True):
"""
Return a proper value for the local size limit for payload stdout (from config file).
:param bytes: boolean (if True, convert kB to Bytes).
:return: size limit (int).
"""
try:
localsizelimit_stdout = int(config.Pilot.local_size_limit_stdout)
except Exception as e:
localsizelimit_stdout = 2097152
logger.warning('bad value in config for local_size_limit_stdout: %s (will use value: %d kB)' %
(e, localsizelimit_stdout))
# convert from kB to B
if bytes:
localsizelimit_stdout *= 1024
return localsizelimit_stdout
def check_payload_stdout(job):
"""
Check the size of the payload stdout.
:param job: job object.
:return: exit code (int), diagnostics (string).
"""
exit_code = 0
diagnostics = ""
# get list of log files
file_list = glob(os.path.join(job.workdir, 'log.*'))
# is this a multi-trf job?
n_jobs = job.jobparams.count("\n") + 1
for _i in range(n_jobs):
# get name of payload stdout file created by the pilot
_stdout = config.Payload.payloadstdout
if n_jobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
# add the primary stdout file to the fileList
file_list.append(os.path.join(job.workdir, _stdout))
# now loop over all files and check each individually (any large enough file will fail the job)
for filename in file_list:
if "job.log.tgz" in filename:
logger.info("skipping file size check of file (%s) since it is a special log file" % (filename))
continue
if os.path.exists(filename):
try:
# get file size in bytes
fsize = os.path.getsize(filename)
except Exception as e:
logger.warning("could not read file size of %s: %s" % (filename, e))
else:
# is the file too big?
localsizelimit_stdout = get_local_size_limit_stdout()
if fsize > localsizelimit_stdout:
exit_code = errors.STDOUTTOOBIG
diagnostics = "Payload stdout file too big: %d B (larger than limit %d B)" % \
(fsize, localsizelimit_stdout)
logger.warning(diagnostics)
# kill the job
set_pilot_state(job=job, state="failed")
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(exit_code)
kill_processes(job.pid)
# remove the payload stdout file after the log extracts have been created
# remove any lingering input files from the work dir
lfns, guids = job.get_lfns_and_guids()
if lfns:
# remove any lingering input files from the work dir
exit_code = remove_files(job.workdir, lfns)
else:
logger.info("payload stdout (%s) within allowed size limit (%d B): %d B" % (_stdout, localsizelimit_stdout, fsize))
else:
logger.info("skipping file size check of payload stdout file (%s) since it has not been created yet" % _stdout)
return exit_code, diagnostics
def check_local_space():
"""
Do we have enough local disk space left to run the job?
:return: pilot error code (0 if success, NOLOCALSPACE if failure)
"""
ec = 0
diagnostics = ""
# is there enough local space to run a job?
cwd = os.getcwd()
logger.debug('checking local space on %s' % cwd)
spaceleft = convert_mb_to_b(get_local_disk_space(cwd)) # B (diskspace is in MB)
free_space_limit = human2bytes(config.Pilot.free_space_limit)
if spaceleft <= free_space_limit:
diagnostics = 'too little space left on local disk to run job: %d B (need > %d B)' %\
(spaceleft, free_space_limit)
ec = errors.NOLOCALSPACE
logger.warning(diagnostics)
else:
logger.info('sufficient remaining disk space (%d B)' % spaceleft)
return ec, diagnostics
def check_work_dir(job):
"""
Check the size of the work directory.
The function also updates the workdirsizes list in the job object.
:param job: job object.
:return: exit code (int), error diagnostics (string)
"""
exit_code = 0
diagnostics = ""
if os.path.exists(job.workdir):
# get the limit of the workdir
maxwdirsize = get_max_allowed_work_dir_size(job.infosys.queuedata)
if os.path.exists(job.workdir):
workdirsize = get_directory_size(directory=job.workdir)
# is user dir within allowed size limit?
if workdirsize > maxwdirsize:
exit_code = errors.USERDIRTOOLARGE
diagnostics = "work directory (%s) is too large: %d B (must be < %d B)" % \
(job.workdir, workdirsize, maxwdirsize)
logger.fatal("%s" % diagnostics)
cmd = 'ls -altrR %s' % job.workdir
_ec, stdout, stderr = execute(cmd, mute=True)
logger.info("%s: %s" % (cmd + '\n', stdout))
# kill the job
# pUtil.createLockFile(True, self.__env['jobDic'][k][1].workdir, lockfile="JOBWILLBEKILLED")
set_pilot_state(job=job, state="failed")
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(exit_code)
kill_processes(job.pid)
# remove any lingering input files from the work dir
lfns, guids = job.get_lfns_and_guids()
if lfns:
remove_files(job.workdir, lfns)
# remeasure the size of the workdir at this point since the value is stored below
workdirsize = get_directory_size(directory=job.workdir)
else:
logger.info("size of work directory %s: %d B (within %d B limit)" % (job.workdir, workdirsize, maxwdirsize))
# Store the measured disk space (the max value will later be sent with the job metrics)
if workdirsize > 0:
job.add_workdir_size(workdirsize)
else:
logger.warning('job work dir does not exist: %s' % job.workdir)
else:
logger.warning('skipping size check of workdir since it has not been created yet')
return exit_code, diagnostics
def get_max_allowed_work_dir_size(queuedata):
"""
Return the maximum allowed size of the work directory.
:param queuedata: job.infosys.queuedata object.
:return: max allowed work dir size in Bytes (int).
"""
try:
maxwdirsize = convert_mb_to_b(get_maximum_input_sizes()) # from MB to B, e.g. 16336 MB -> 17,129,537,536 B
except Exception as e:
max_input_size = get_max_input_size()
maxwdirsize = max_input_size + config.Pilot.local_size_limit_stdout * 1024
logger.info("work directory size check will use %d B as a max limit (maxinputsize [%d B] + local size limit for"
" stdout [%d B])" % (maxwdirsize, max_input_size, config.Pilot.local_size_limit_stdout * 1024))
logger.warning('conversion caught exception: %s' % e)
else:
# grace margin, as discussed in https://its.cern.ch/jira/browse/ATLASPANDA-482
margin = 10.0 # percent, read later from somewhere
maxwdirsize = int(maxwdirsize * (1 + margin / 100.0))
logger.info("work directory size check will use %d B as a max limit (10%% grace limit added)" % maxwdirsize)
return maxwdirsize
def get_max_input_size(queuedata, megabyte=False):
"""
Return a proper maxinputsize value.
:param queuedata: job.infosys.queuedata object.
:param megabyte: return results in MB (Boolean).
:return: max input size (int).
"""
_maxinputsize = queuedata.maxwdir # normally 14336+2000 MB
max_input_file_sizes = 14 * 1024 * 1024 * 1024 # 14 GB, 14336 MB (pilot default)
max_input_file_sizes_mb = 14 * 1024 # 14336 MB (pilot default)
if _maxinputsize != "":
try:
if megabyte: # convert to MB int
_maxinputsize = int(_maxinputsize) # MB
else: # convert to B int
_maxinputsize = int(_maxinputsize) * 1024 * 1024 # MB -> B
except Exception as e:
logger.warning("schedconfig.maxinputsize: %s" % e)
if megabyte:
_maxinputsize = max_input_file_sizes_mb
else:
_maxinputsize = max_input_file_sizes
else:
if megabyte:
_maxinputsize = max_input_file_sizes_mb
else:
_maxinputsize = max_input_file_sizes
if megabyte:
logger.info("max input size = %d MB (pilot default)" % _maxinputsize)
else:
logger.info("Max input size = %d B (pilot default)" % _maxinputsize)
return _maxinputsize
def check_output_file_sizes(job):
"""
Are the output files within the allowed size limits?
:param job: job object.
:return: exit code (int), error diagnostics (string)
"""
exit_code = 0
diagnostics = ""
# loop over all known output files
for fspec in job.outdata:
path = os.path.join(job.workdir, fspec.lfn)
if os.path.exists(path):
# get the current file size
fsize = get_local_file_size(path)
max_fsize = human2bytes(config.Pilot.maximum_output_file_size)
if fsize and fsize < max_fsize:
logger.info('output file %s is within allowed size limit (%d B < %d B)' % (path, fsize, max_fsize))
else:
exit_code = errors.OUTPUTFILETOOLARGE
diagnostics = 'output file %s is not within allowed size limit (%d B > %d B)' % (path, fsize, max_fsize)
logger.warning(diagnostics)
else:
logger.info('output file size check: skipping output file %s since it does not exist' % path)
return exit_code, diagnostics
|
|
"""Unit tests for storm_tracking_utils.py."""
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
TOLERANCE = 1e-6
MIN_BUFFER_DISTANCE_METRES = 0.
MAX_BUFFER_DISTANCE_METRES = 5000.
BUFFER_COLUMN_NAME_INCLUSIVE = 'polygon_object_latlng_deg_buffer_5000m'
BUFFER_COLUMN_NAME_EXCLUSIVE = 'polygon_object_latlng_deg_buffer_0m_5000m'
BUFFER_COLUMN_NAME_FAKE = 'foobar'
# The following constants are used to test find_storm_objects.
ALL_STORM_ID_STRINGS = ['a', 'b', 'c', 'd', 'a', 'c', 'e', 'f', 'e']
ALL_TIMES_UNIX_SEC = numpy.array([0, 0, 0, 0, 1, 1, 1, 1, 2], dtype=int)
KEPT_ID_STRINGS_0MISSING = ['a', 'c', 'a', 'e', 'e', 'e']
KEPT_TIMES_UNIX_SEC_0MISSING = numpy.array([0, 0, 1, 1, 2, 1], dtype=int)
RELEVANT_INDICES_0MISSING = numpy.array([0, 2, 4, 6, 8, 6], dtype=int)
KEPT_ID_STRINGS_1MISSING = ['a', 'c', 'a', 'e', 'e', 'e', 'a']
KEPT_TIMES_UNIX_SEC_1MISSING = numpy.array([0, 0, 1, 1, 2, 1, 2], dtype=int)
RELEVANT_INDICES_1MISSING = numpy.array([0, 2, 4, 6, 8, 6, -1], dtype=int)
# The following constants are used to test storm_objects_to_tracks.
THESE_ID_STRINGS = [
'foo', 'bar', 'hal', 'foo', 'bar', 'moo', 'empty', 'foo', 'moo', 'empty'
]
THESE_TIMES_UNIX_SEC = numpy.array(
[0, 0, 0, 300, 300, 300, 300, 600, 600, 600], dtype=int
)
THESE_X_COORDS_METRES = numpy.array(
[10, 0, 20, 11, 1, 30, numpy.nan, 12, 31, numpy.nan]
)
THESE_Y_COORDS_METRES = numpy.array(
[100, 0, 200, 105, 5, 300, numpy.nan, 110, 305, numpy.nan]
)
THIS_DICT = {
tracking_utils.PRIMARY_ID_COLUMN: THESE_ID_STRINGS,
tracking_utils.VALID_TIME_COLUMN: THESE_TIMES_UNIX_SEC,
tracking_utils.CENTROID_LATITUDE_COLUMN: THESE_Y_COORDS_METRES,
tracking_utils.CENTROID_LONGITUDE_COLUMN: THESE_X_COORDS_METRES,
tracking_utils.CENTROID_X_COLUMN: THESE_X_COORDS_METRES,
tracking_utils.CENTROID_Y_COLUMN: THESE_Y_COORDS_METRES
}
STORM_OBJECT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
THESE_ID_STRINGS = ['bar', 'empty', 'foo', 'hal', 'moo']
THIS_DICT = {
tracking_utils.PRIMARY_ID_COLUMN: THESE_ID_STRINGS,
tracking_utils.TRACK_TIMES_COLUMN:
[[0, 300], [300, 600], [0, 300, 600], [0], [300, 600]],
tracking_utils.OBJECT_INDICES_COLUMN:
[[1, 4], [6, 9], [0, 3, 7], [2], [5, 8]],
tracking_utils.TRACK_X_COORDS_COLUMN:
[[0, 1], [numpy.nan, numpy.nan], [10, 11, 12], [20], [30, 31]],
tracking_utils.TRACK_Y_COORDS_COLUMN:
[[0, 5], [numpy.nan, numpy.nan], [100, 105, 110], [200], [300, 305]]
}
STORM_TRACK_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
STORM_TRACK_TABLE[tracking_utils.TRACK_LATITUDES_COLUMN] = STORM_TRACK_TABLE[
tracking_utils.TRACK_Y_COORDS_COLUMN]
STORM_TRACK_TABLE[tracking_utils.TRACK_LONGITUDES_COLUMN] = STORM_TRACK_TABLE[
tracking_utils.TRACK_X_COORDS_COLUMN]
def _compare_storm_track_tables(
first_storm_track_table, second_storm_track_table):
"""Compares two tables with storm tracks.
:param first_storm_track_table: First table (pandas DataFrame).
:param second_storm_track_table: Second table.
:return: are_tables_equal: Boolean flag.
"""
first_column_names = list(first_storm_track_table)
second_column_names = list(second_storm_track_table)
if set(first_column_names) != set(second_column_names):
return False
first_num_tracks = len(first_storm_track_table.index)
second_num_tracks = len(first_storm_track_table.index)
if first_num_tracks != second_num_tracks:
return False
for this_column in first_column_names:
if this_column == tracking_utils.PRIMARY_ID_COLUMN:
if not numpy.array_equal(
first_storm_track_table[this_column].values,
second_storm_track_table[this_column].values):
return False
else:
for i in range(first_num_tracks):
if not numpy.allclose(
first_storm_track_table[this_column].values[i],
second_storm_track_table[this_column].values[i],
atol=TOLERANCE, equal_nan=True):
return False
return True
class StormTrackingUtilsTests(unittest.TestCase):
"""Each method is a unit test for storm_tracking_utils.py."""
def test_column_name_to_buffer_inclusive(self):
"""Ensures correct output from column_name_to_buffer.
In this case the distance buffer includes the storm object.
"""
this_min_distance_metres, this_max_distance_metres = (
tracking_utils.column_name_to_buffer(BUFFER_COLUMN_NAME_INCLUSIVE)
)
self.assertTrue(numpy.isnan(this_min_distance_metres))
self.assertTrue(numpy.isclose(
this_max_distance_metres, MAX_BUFFER_DISTANCE_METRES, atol=TOLERANCE
))
def test_column_name_to_buffer_exclusive(self):
"""Ensures correct output from column_name_to_buffer.
In this case the distance buffer does not include the storm object.
"""
this_min_distance_metres, this_max_distance_metres = (
tracking_utils.column_name_to_buffer(BUFFER_COLUMN_NAME_EXCLUSIVE)
)
self.assertTrue(numpy.isclose(
this_min_distance_metres, MIN_BUFFER_DISTANCE_METRES, atol=TOLERANCE
))
self.assertTrue(numpy.isclose(
this_max_distance_metres, MAX_BUFFER_DISTANCE_METRES, atol=TOLERANCE
))
def test_column_name_to_buffer_fake(self):
"""Ensures correct output from column_name_to_buffer.
In this case the column name is malformatted.
"""
this_min_distance_metres, this_max_distance_metres = (
tracking_utils.column_name_to_buffer(BUFFER_COLUMN_NAME_FAKE)
)
self.assertTrue(this_min_distance_metres is None)
self.assertTrue(this_max_distance_metres is None)
def test_buffer_to_column_name_inclusive(self):
"""Ensures correct output from buffer_to_column_name.
In this case the distance buffer includes the storm object.
"""
this_column_name = tracking_utils.buffer_to_column_name(
min_distance_metres=numpy.nan,
max_distance_metres=MAX_BUFFER_DISTANCE_METRES)
self.assertTrue(this_column_name == BUFFER_COLUMN_NAME_INCLUSIVE)
def test_buffer_to_column_name_exclusive(self):
"""Ensures correct output from buffer_to_column_name.
In this case the distance buffer does not include the storm object.
"""
this_column_name = tracking_utils.buffer_to_column_name(
min_distance_metres=MIN_BUFFER_DISTANCE_METRES,
max_distance_metres=MAX_BUFFER_DISTANCE_METRES)
self.assertTrue(this_column_name == BUFFER_COLUMN_NAME_EXCLUSIVE)
def test_find_storm_objects_0missing(self):
"""Ensures correct output from find_storm_objects.
In this case, no desired storm objects are missing.
"""
these_indices = tracking_utils.find_storm_objects(
all_id_strings=ALL_STORM_ID_STRINGS,
all_times_unix_sec=ALL_TIMES_UNIX_SEC,
id_strings_to_keep=KEPT_ID_STRINGS_0MISSING,
times_to_keep_unix_sec=KEPT_TIMES_UNIX_SEC_0MISSING,
allow_missing=False)
self.assertTrue(numpy.array_equal(
these_indices, RELEVANT_INDICES_0MISSING
))
def test_find_storm_objects_allow_missing_false(self):
"""Ensures correct output from find_storm_objects.
In this case, one desired storm object is missing and
`allow_missing = False`.
"""
with self.assertRaises(ValueError):
tracking_utils.find_storm_objects(
all_id_strings=ALL_STORM_ID_STRINGS,
all_times_unix_sec=ALL_TIMES_UNIX_SEC,
id_strings_to_keep=KEPT_ID_STRINGS_1MISSING,
times_to_keep_unix_sec=KEPT_TIMES_UNIX_SEC_1MISSING,
allow_missing=False)
def test_find_storm_objects_allow_missing_true(self):
"""Ensures correct output from find_storm_objects.
In this case, one desired storm object is missing and
`allow_missing = True`.
"""
these_indices = tracking_utils.find_storm_objects(
all_id_strings=ALL_STORM_ID_STRINGS,
all_times_unix_sec=ALL_TIMES_UNIX_SEC,
id_strings_to_keep=KEPT_ID_STRINGS_1MISSING,
times_to_keep_unix_sec=KEPT_TIMES_UNIX_SEC_1MISSING,
allow_missing=True)
self.assertTrue(numpy.array_equal(
these_indices, RELEVANT_INDICES_1MISSING
))
def test_storm_objects_to_tracks(self):
"""Ensures correct output from storm_objects_to_tracks."""
this_storm_track_table = tracking_utils.storm_objects_to_tracks(
STORM_OBJECT_TABLE)
this_storm_track_table.sort_values(
tracking_utils.PRIMARY_ID_COLUMN, axis=0, ascending=True,
inplace=True)
self.assertTrue(_compare_storm_track_tables(
this_storm_track_table, STORM_TRACK_TABLE
))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Control the flow of optimizations applied to node tree.
Applies abstract execution on all so far known modules until no more
optimization is possible. Every successful optimization to anything might
make others possible.
"""
from logging import debug
from nuitka import ModuleRegistry, Options, VariableRegistry
from nuitka.optimizations import TraceCollections
from nuitka.plugins.PluginBase import Plugins
from nuitka.Tracing import printLine
from nuitka.utils import Utils
from .Tags import TagSet
_progress = Options.isShowProgress()
def _attemptRecursion(module):
new_modules = module.attemptRecursion()
for new_module in new_modules:
debug(
"{source_ref} : {tags} : {message}".format(
source_ref = new_module.getSourceReference().getAsString(),
tags = "new_code",
message = "Recursed to module package."
)
)
tag_set = None
def signalChange(tags, source_ref, message):
""" Indicate a change to the optimization framework.
"""
if message is not None:
debug(
"{source_ref} : {tags} : {message}".format(
source_ref = source_ref.getAsString(),
tags = tags,
message = message
)
)
tag_set.onSignal(tags)
# Use this globally from there, without cyclic dependency.
TraceCollections.signalChange = signalChange
def optimizePythonModule(module):
if _progress:
printLine(
"Doing module local optimizations for '{module_name}'.".format(
module_name = module.getFullName()
)
)
# The tag set is global, so it can react to changes without context.
# pylint: disable=W0603
global tag_set
tag_set = TagSet()
touched = False
if _progress:
memory_watch = Utils.MemoryWatch()
while True:
tag_set.clear()
module.computeModule()
if not tag_set:
break
touched = True
if _progress:
memory_watch.finish()
printLine(
"Memory usage changed during optimization of '%s': %s" % (
module.getFullName(),
memory_watch.asStr()
)
)
return touched or module.hasUnclearLocals()
def optimizeShlibModule(module):
# Pick up parent package if any.
_attemptRecursion(module)
# The tag set is global, so it can react to changes without context.
# pylint: disable=W0603
global tag_set
tag_set = TagSet()
Plugins.considerImplicitImports(module, signal_change = signalChange)
def areEmptyTraces(variable_traces):
empty = True
for variable_trace in variable_traces:
if variable_trace.isAssignTrace():
empty = False
break
elif variable_trace.isInitTrace():
empty = False
break
elif variable_trace.isUninitTrace():
if variable_trace.getPrevious():
# A "del" statement can do this, and needs to prevent variable
# from being removed.
empty = False
break
elif variable_trace.hasDefiniteUsages():
# Checking definite is enough, the merges, we shall see
# them as well.
empty = False
break
elif variable_trace.isUnknownTrace():
if variable_trace.hasDefiniteUsages():
# Checking definite is enough, the merges, we shall see
# them as well.
empty = False
break
elif variable_trace.isMergeTrace():
if variable_trace.hasDefiniteUsages():
# Checking definite is enough, the merges, we shall see
# them as well.
empty = False
break
elif variable_trace.isEscaped():
assert False, variable_trace
# If the value is escape, we still need to keep it for that
# escape opportunity. This is only while that is not seen
# as a definite usage.
empty = False
break
else:
assert False, variable_trace
return empty
def areReadOnlyTraces(variable_traces):
read_only = True
for variable_trace in variable_traces:
if variable_trace.isAssignTrace():
read_only = False
break
elif variable_trace.isInitTrace():
read_only = False
break
return read_only
def optimizeUnusedClosureVariables(function_body):
for closure_variable in function_body.getClosureVariables():
# print "VAR", closure_variable
variable_traces = function_body.constraint_collection.getVariableTraces(
variable = closure_variable
)
empty = areEmptyTraces(variable_traces)
if empty:
signalChange(
"var_usage",
function_body.getSourceReference(),
message = "Remove unused closure variable."
)
function_body.removeClosureVariable(closure_variable)
else:
read_only = areReadOnlyTraces(variable_traces)
if read_only:
global_trace = VariableRegistry.getGlobalVariableTrace(closure_variable)
if global_trace is not None:
if not global_trace.hasWritesOutsideOf(function_body):
function_body.demoteClosureVariable(closure_variable)
signalChange(
"var_usage",
function_body.getSourceReference(),
message = "Turn read-only usage of unassigned closure variable to local variable."
)
def optimizeUnusedUserVariables(function_body):
for local_variable in function_body.getUserLocalVariables():
variable_traces = function_body.constraint_collection.getVariableTraces(
variable = local_variable
)
empty = areEmptyTraces(variable_traces)
if empty:
function_body.removeUserVariable(local_variable)
def optimizeUnusedTempVariables(provider):
for temp_variable in provider.getTempVariables():
variable_traces = provider.constraint_collection.getVariableTraces(
variable = temp_variable
)
empty = areEmptyTraces(variable_traces)
if empty:
provider.removeTempVariable(temp_variable)
def optimizeVariables(module):
for function_body in module.getUsedFunctions():
constraint_collection = function_body.constraint_collection
if constraint_collection.unclear_locals:
continue
optimizeUnusedUserVariables(function_body)
optimizeUnusedClosureVariables(function_body)
optimizeUnusedTempVariables(function_body)
optimizeUnusedTempVariables(module)
def optimize():
# This is somewhat complex with many cases, pylint: disable=R0912
while True:
finished = True
ModuleRegistry.startTraversal()
while True:
current_module = ModuleRegistry.nextModule()
if current_module is None:
break
if _progress:
printLine(
"""\
Optimizing module '{module_name}', {remaining:d} more modules to go \
after that. Memory usage {memory}:""".format(
module_name = current_module.getFullName(),
remaining = ModuleRegistry.remainingCount(),
memory = Utils.getHumanReadableProcessMemoryUsage()
)
)
if current_module.isPythonShlibModule():
optimizeShlibModule(current_module)
else:
changed = optimizePythonModule(current_module)
if changed:
finished = False
# Unregister collection traces from now unused code.
for current_module in ModuleRegistry.getDoneModules():
if not current_module.isPythonShlibModule():
for function in current_module.getUnusedFunctions():
VariableRegistry.updateFromCollection(
old_collection = function.constraint_collection,
new_collection = None
)
function.constraint_collection = None
if not VariableRegistry.complete:
VariableRegistry.complete = True
finished = False
for current_module in ModuleRegistry.getDoneModules():
if not current_module.isPythonShlibModule():
optimizeVariables(current_module)
if finished:
break
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.autopilot.v1.assistant.defaults import DefaultsList
from twilio.rest.autopilot.v1.assistant.dialogue import DialogueList
from twilio.rest.autopilot.v1.assistant.field_type import FieldTypeList
from twilio.rest.autopilot.v1.assistant.model_build import ModelBuildList
from twilio.rest.autopilot.v1.assistant.query import QueryList
from twilio.rest.autopilot.v1.assistant.style_sheet import StyleSheetList
from twilio.rest.autopilot.v1.assistant.task import TaskList
from twilio.rest.autopilot.v1.assistant.webhook import WebhookList
class AssistantList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the AssistantList
:param Version version: Version that contains the resource
:returns: twilio.rest.autopilot.v1.assistant.AssistantList
:rtype: twilio.rest.autopilot.v1.assistant.AssistantList
"""
super(AssistantList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Assistants'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams AssistantInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.AssistantInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists AssistantInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.AssistantInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AssistantInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return AssistantPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AssistantInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AssistantPage(self._version, response, self._solution)
def create(self, friendly_name=values.unset, log_queries=values.unset,
unique_name=values.unset, callback_url=values.unset,
callback_events=values.unset, style_sheet=values.unset,
defaults=values.unset):
"""
Create a new AssistantInstance
:param unicode friendly_name: A string to describe the new resource
:param bool log_queries: Whether queries should be logged and kept after training
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:param unicode callback_url: Reserved
:param unicode callback_events: Reserved
:param dict style_sheet: A JSON string that defines the Assistant's style sheet
:param dict defaults: A JSON object that defines the Assistant's default tasks for various scenarios
:returns: Newly created AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'LogQueries': log_queries,
'UniqueName': unique_name,
'CallbackUrl': callback_url,
'CallbackEvents': callback_events,
'StyleSheet': serialize.object(style_sheet),
'Defaults': serialize.object(defaults),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AssistantInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a AssistantContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.AssistantContext
:rtype: twilio.rest.autopilot.v1.assistant.AssistantContext
"""
return AssistantContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a AssistantContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.AssistantContext
:rtype: twilio.rest.autopilot.v1.assistant.AssistantContext
"""
return AssistantContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.AssistantList>'
class AssistantPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the AssistantPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.autopilot.v1.assistant.AssistantPage
:rtype: twilio.rest.autopilot.v1.assistant.AssistantPage
"""
super(AssistantPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AssistantInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
return AssistantInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.AssistantPage>'
class AssistantContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the AssistantContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.AssistantContext
:rtype: twilio.rest.autopilot.v1.assistant.AssistantContext
"""
super(AssistantContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Assistants/{sid}'.format(**self._solution)
# Dependents
self._field_types = None
self._tasks = None
self._model_builds = None
self._queries = None
self._style_sheet = None
self._defaults = None
self._dialogues = None
self._webhooks = None
def fetch(self):
"""
Fetch a AssistantInstance
:returns: Fetched AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return AssistantInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, friendly_name=values.unset, log_queries=values.unset,
unique_name=values.unset, callback_url=values.unset,
callback_events=values.unset, style_sheet=values.unset,
defaults=values.unset, development_stage=values.unset):
"""
Update the AssistantInstance
:param unicode friendly_name: A string to describe the resource
:param bool log_queries: Whether queries should be logged and kept after training
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode callback_url: Reserved
:param unicode callback_events: Reserved
:param dict style_sheet: A JSON string that defines the Assistant's style sheet
:param dict defaults: A JSON object that defines the Assistant's [default tasks](https://www.twilio.com/docs/autopilot/api/assistant/defaults) for various scenarios
:param unicode development_stage: A string describing the state of the assistant.
:returns: Updated AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'LogQueries': log_queries,
'UniqueName': unique_name,
'CallbackUrl': callback_url,
'CallbackEvents': callback_events,
'StyleSheet': serialize.object(style_sheet),
'Defaults': serialize.object(defaults),
'DevelopmentStage': development_stage,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return AssistantInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the AssistantInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
@property
def field_types(self):
"""
Access the field_types
:returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
"""
if self._field_types is None:
self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], )
return self._field_types
@property
def tasks(self):
"""
Access the tasks
:returns: twilio.rest.autopilot.v1.assistant.task.TaskList
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskList
"""
if self._tasks is None:
self._tasks = TaskList(self._version, assistant_sid=self._solution['sid'], )
return self._tasks
@property
def model_builds(self):
"""
Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
"""
if self._model_builds is None:
self._model_builds = ModelBuildList(self._version, assistant_sid=self._solution['sid'], )
return self._model_builds
@property
def queries(self):
"""
Access the queries
:returns: twilio.rest.autopilot.v1.assistant.query.QueryList
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryList
"""
if self._queries is None:
self._queries = QueryList(self._version, assistant_sid=self._solution['sid'], )
return self._queries
@property
def style_sheet(self):
"""
Access the style_sheet
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
"""
if self._style_sheet is None:
self._style_sheet = StyleSheetList(self._version, assistant_sid=self._solution['sid'], )
return self._style_sheet
@property
def defaults(self):
"""
Access the defaults
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
"""
if self._defaults is None:
self._defaults = DefaultsList(self._version, assistant_sid=self._solution['sid'], )
return self._defaults
@property
def dialogues(self):
"""
Access the dialogues
:returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
:rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
"""
if self._dialogues is None:
self._dialogues = DialogueList(self._version, assistant_sid=self._solution['sid'], )
return self._dialogues
@property
def webhooks(self):
"""
Access the webhooks
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
"""
if self._webhooks is None:
self._webhooks = WebhookList(self._version, assistant_sid=self._solution['sid'], )
return self._webhooks
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.AssistantContext {}>'.format(context)
class AssistantInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, sid=None):
"""
Initialize the AssistantInstance
:returns: twilio.rest.autopilot.v1.assistant.AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
super(AssistantInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'latest_model_build_sid': payload.get('latest_model_build_sid'),
'links': payload.get('links'),
'log_queries': payload.get('log_queries'),
'development_stage': payload.get('development_stage'),
'needs_model_build': payload.get('needs_model_build'),
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'url': payload.get('url'),
'callback_url': payload.get('callback_url'),
'callback_events': payload.get('callback_events'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AssistantContext for this AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantContext
"""
if self._context is None:
self._context = AssistantContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def latest_model_build_sid(self):
"""
:returns: Reserved
:rtype: unicode
"""
return self._properties['latest_model_build_sid']
@property
def links(self):
"""
:returns: A list of the URLs of the Assistant's related resources
:rtype: unicode
"""
return self._properties['links']
@property
def log_queries(self):
"""
:returns: Whether queries should be logged and kept after training
:rtype: bool
"""
return self._properties['log_queries']
@property
def development_stage(self):
"""
:returns: A string describing the state of the assistant.
:rtype: unicode
"""
return self._properties['development_stage']
@property
def needs_model_build(self):
"""
:returns: Whether model needs to be rebuilt
:rtype: bool
"""
return self._properties['needs_model_build']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def url(self):
"""
:returns: The absolute URL of the Assistant resource
:rtype: unicode
"""
return self._properties['url']
@property
def callback_url(self):
"""
:returns: Reserved
:rtype: unicode
"""
return self._properties['callback_url']
@property
def callback_events(self):
"""
:returns: Reserved
:rtype: unicode
"""
return self._properties['callback_events']
def fetch(self):
"""
Fetch a AssistantInstance
:returns: Fetched AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, log_queries=values.unset,
unique_name=values.unset, callback_url=values.unset,
callback_events=values.unset, style_sheet=values.unset,
defaults=values.unset, development_stage=values.unset):
"""
Update the AssistantInstance
:param unicode friendly_name: A string to describe the resource
:param bool log_queries: Whether queries should be logged and kept after training
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode callback_url: Reserved
:param unicode callback_events: Reserved
:param dict style_sheet: A JSON string that defines the Assistant's style sheet
:param dict defaults: A JSON object that defines the Assistant's [default tasks](https://www.twilio.com/docs/autopilot/api/assistant/defaults) for various scenarios
:param unicode development_stage: A string describing the state of the assistant.
:returns: Updated AssistantInstance
:rtype: twilio.rest.autopilot.v1.assistant.AssistantInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
log_queries=log_queries,
unique_name=unique_name,
callback_url=callback_url,
callback_events=callback_events,
style_sheet=style_sheet,
defaults=defaults,
development_stage=development_stage,
)
def delete(self):
"""
Deletes the AssistantInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
@property
def field_types(self):
"""
Access the field_types
:returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
"""
return self._proxy.field_types
@property
def tasks(self):
"""
Access the tasks
:returns: twilio.rest.autopilot.v1.assistant.task.TaskList
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskList
"""
return self._proxy.tasks
@property
def model_builds(self):
"""
Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
"""
return self._proxy.model_builds
@property
def queries(self):
"""
Access the queries
:returns: twilio.rest.autopilot.v1.assistant.query.QueryList
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryList
"""
return self._proxy.queries
@property
def style_sheet(self):
"""
Access the style_sheet
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
"""
return self._proxy.style_sheet
@property
def defaults(self):
"""
Access the defaults
:returns: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
:rtype: twilio.rest.autopilot.v1.assistant.defaults.DefaultsList
"""
return self._proxy.defaults
@property
def dialogues(self):
"""
Access the dialogues
:returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
:rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
"""
return self._proxy.dialogues
@property
def webhooks(self):
"""
Access the webhooks
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
"""
return self._proxy.webhooks
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.AssistantInstance {}>'.format(context)
|
|
"""Plot a GODagSmall."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import sys
import os
import collections as cx
from collections import OrderedDict
from goatools.godag_obosm import OboToGoDagSmall
def plot_gos(fout_png, goids, obo_dag, *args, **kws):
"""Given GO ids and the obo_dag, create a plot of paths from GO ids."""
engine = kws['engine'] if 'engine' in kws else 'pydot'
godagsmall = OboToGoDagSmall(goids=goids, obodag=obo_dag).godag
godagplot = GODagSmallPlot(godagsmall, *args, **kws)
godagplot.plt(fout_png, engine)
def plot_goid2goobj(fout_png, goid2goobj, *args, **kws):
"""Given a dict containing GO id and its goobj, create a plot of paths from GO ids."""
engine = kws['engine'] if 'engine' in kws else 'pydot'
godagsmall = OboToGoDagSmall(goid2goobj=goid2goobj).godag
godagplot = GODagSmallPlot(godagsmall, *args, **kws)
godagplot.plt(fout_png, engine)
def plot_results(fout_png, goea_results, *args, **kws):
"""Given a list of GOEA results, plot result GOs up to top."""
if "{NS}" not in fout_png:
plt_goea_results(fout_png, goea_results, *args, **kws)
else:
# Plot separately by NS: BP, MF, CC
ns2goea_results = cx.defaultdict(list)
for rec in goea_results:
ns2goea_results[rec.NS].append(rec)
for ns_name, ns_res in ns2goea_results.items():
png = fout_png.format(NS=ns_name)
plt_goea_results(png, ns_res, *args, **kws)
def plt_goea_results(fout_png, goea_results, *args, **kws):
"""Plot a single page."""
engine = kws['engine'] if 'engine' in kws else 'pydot'
godagsmall = OboToGoDagSmall(goea_results=goea_results).godag
godagplot = GODagSmallPlot(godagsmall, *args, goea_results=goea_results, **kws)
godagplot.plt(fout_png, engine)
class GODagPltVars(object):
"""Holds plotting paramters."""
# http://www.graphviz.org/doc/info/colors.html
rel2col = {
'is_a': 'black',
'part_of': 'blue',
'regulates': 'gold',
'positively_regulates': 'green',
'negatively_regulates': 'red',
'occurs_in': 'aquamarine4',
'capable_of': 'dodgerblue',
'capable_of_part_of': 'darkorange',
}
alpha2col = OrderedDict([
# GOEA GO terms that are significant
(0.005, 'mistyrose'),
(0.010, 'moccasin'),
(0.050, 'lemonchiffon1'),
# GOEA GO terms that are not significant
(1.000, 'grey95'),
])
key2col = {
'level_01': 'lightcyan',
'go_sources': 'palegreen',
}
fmthdr = "{GO} L{level:>02} D{depth:>02}"
fmtres = "{study_count} genes"
# study items per line on GO Terms:
items_p_line = 5
class GODagSmallPlot(object):
"""Plot a graph contained in an object of type GODagSmall ."""
def __init__(self, godagsmall, *args, **kws):
self.args = args
self.log = kws['log'] if 'log' in kws else sys.stdout
self.title = kws['title'] if 'title' in kws else None
# GOATOOLs results as objects
self.go2res = self._init_go2res(**kws)
# GOATOOLs results as a list of namedtuples
self.pval_name = self._init_pval_name(**kws)
# Gene Symbol names
self.id2symbol = kws['id2symbol'] if 'id2symbol' in kws else {}
self.study_items = kws['study_items'] if 'study_items' in kws else None
self.study_items_max = self._init_study_items_max()
self.alpha_str = kws['alpha_str'] if 'alpha_str' in kws else None
self.pltvars = kws['GODagPltVars'] if 'GODagPltVars' in kws else GODagPltVars()
if 'items_p_line' in kws:
self.pltvars.items_p_line = kws['items_p_line']
self.dpi = kws['dpi'] if 'dpi' in kws else 150
self.godag = godagsmall
self.goid2color = self._init_goid2color()
self.pydot = None
def _init_study_items_max(self):
"""User can limit the number of genes printed in a GO term."""
if self.study_items is None:
return None
if self.study_items is True:
return None
if isinstance(self.study_items, int):
return self.study_items
return None
@staticmethod
def _init_go2res(**kws):
"""Initialize GOEA results."""
if 'goea_results' in kws:
return {res.GO:res for res in kws['goea_results']}
if 'go2nt' in kws:
return kws['go2nt']
@staticmethod
def _init_pval_name(**kws):
"""Initialize pvalue attribute name."""
if 'pval_name' in kws:
return kws['pval_name']
if 'goea_results' in kws:
goea = kws['goea_results']
if goea:
return "p_{M}".format(M=goea[0].method_flds[0].fieldname)
def _init_goid2color(self):
"""Set colors of GO terms."""
goid2color = {}
# 1. colors based on p-value override colors based on source GO
if self.go2res is not None:
alpha2col = self.pltvars.alpha2col
pval_name = self.pval_name
for goid, res in self.go2res.items():
pval = getattr(res, pval_name, None)
if pval is not None:
for alpha, color in alpha2col.items():
if pval <= alpha and res.study_count != 0:
if goid not in goid2color:
goid2color[goid] = color
# 2. GO source color
color = self.pltvars.key2col['go_sources']
for goid in self.godag.go_sources:
if goid not in goid2color:
goid2color[goid] = color
# 3. Level-01 GO color
color = self.pltvars.key2col['level_01']
for goid, goobj in self.godag.go2obj.items():
if goobj.level == 1:
if goid not in goid2color:
goid2color[goid] = color
return goid2color
def plt(self, fout_img, engine="pydot"):
"""Plot using pydot, graphviz, or GML."""
if engine == "pydot":
self._plt_pydot(fout_img)
elif engine == "pygraphviz":
raise Exception("TO BE IMPLEMENTED SOON: ENGINE pygraphvis")
else:
raise Exception("UNKNOWN ENGINE({E})".format(E=engine))
# ----------------------------------------------------------------------------------
# pydot
def _plt_pydot(self, fout_img):
"""Plot using the pydot graphics engine."""
dag = self._get_pydot_graph()
img_fmt = os.path.splitext(fout_img)[1][1:]
dag.write(fout_img, format=img_fmt)
self.log.write(" {GO_USR:>3} usr {GO_ALL:>3} GOs WROTE: {F}\n".format(
F=fout_img,
GO_USR=len(self.godag.go_sources),
GO_ALL=len(self.godag.go2obj)))
def _get_pydot_graph(self):
"""Given a DAG, return a pydot digraph object."""
rel = "is_a"
pydot = self._get_pydot()
# Initialize empty dag
dag = pydot.Dot(label=self.title, graph_type='digraph', dpi="{}".format(self.dpi))
# Initialize nodes
go2node = self._get_go2pydotnode()
# Add nodes to graph
for node in go2node.values():
dag.add_node(node)
# Add edges to graph
rel2col = self.pltvars.rel2col
for src, tgt in self.godag.get_edges():
dag.add_edge(pydot.Edge(
go2node[tgt], go2node[src],
shape="normal",
color=rel2col[rel],
dir="back")) # invert arrow direction for obo dag convention
return dag
def _get_go2pydotnode(self):
"""Create pydot Nodes."""
go2node = {}
for goid, goobj in self.godag.go2obj.items():
txt = self._get_node_text(goid, goobj)
fillcolor = self.goid2color.get(goid, "white")
node = self.pydot.Node(
txt,
shape="box",
style="rounded, filled",
fillcolor=fillcolor,
color="mediumseagreen")
go2node[goid] = node
return go2node
def _get_pydot(self):
"""Return pydot package. Load pydot, if necessary."""
if self.pydot:
return self.pydot
self.pydot = __import__("pydot")
return self.pydot
# ----------------------------------------------------------------------------------
# Methods for text printed inside GO terms
def _get_node_text(self, goid, goobj):
"""Return a string to be printed in a GO term box."""
txt = []
# Header line: "GO:0036464 L04 D06"
txt.append(self.pltvars.fmthdr.format(
GO=goobj.id.replace("GO:", "GO"),
level=goobj.level,
depth=goobj.depth))
# GO name line: "cytoplamic ribonucleoprotein"
name = goobj.name.replace(",", "\n")
txt.append(name)
# study info line: "24 genes"
study_txt = self._get_study_txt(goid)
if study_txt is not None:
txt.append(study_txt)
# return text string
return "\n".join(txt)
def _get_study_txt(self, goid):
"""Get GO text from GOEA study."""
if self.go2res is not None:
res = self.go2res.get(goid, None)
if res is not None:
if self.study_items is not None:
return self._get_item_str(res)
else:
return self.pltvars.fmtres.format(
study_count=res.study_count)
def _get_item_str(self, res):
"""Return genes in any of these formats:
1. 19264, 17319, 12520, 12043, 74131, 22163, 12575
2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a
3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3...
"""
npl = self.pltvars.items_p_line # Number of items Per Line
prt_items = sorted([self.__get_genestr(itemid) for itemid in res.study_items])
prt_multiline = [prt_items[i:i+npl] for i in range(0, len(prt_items), npl)]
num_items = len(prt_items)
if self.study_items_max is None:
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return "{N}) {GENES}".format(N=num_items, GENES=genestr)
else:
if num_items <= self.study_items_max:
strs = [", ".join(str(e) for e in sublist) for sublist in prt_multiline]
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return genestr
else:
short_list = prt_items[:self.study_items_max]
short_mult = [short_list[i:i+npl] for i in range(0, len(short_list), npl)]
short_str = "\n".join([", ".join(str(e) for e in sublist) for sublist in short_mult])
return "".join(["{N} genes; ".format(N=num_items), short_str, "..."])
def __get_genestr(self, itemid):
"""Given a geneid, return the string geneid or a gene symbol."""
if self.id2symbol is not None:
symbol = self.id2symbol.get(itemid, None)
if symbol is not None:
return symbol
if isinstance(itemid, int):
return str(itemid)
return itemid
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_service import fixture as service_fixture
from oslo_utils import encodeutils
from nova import context
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
class GuestTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestTestCase, self).setUp()
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
# Make RetryDecorator not actually sleep on retries
self.useFixture(service_fixture.SleepFixture())
def test_repr(self):
self.domain.ID.return_value = 99
self.domain.UUIDString.return_value = "UUID"
self.domain.name.return_value = "foo"
self.assertEqual("<Guest 99 foo UUID>", repr(self.guest))
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create(self, mock_define):
libvirt_guest.Guest.create("xml", self.host)
mock_define.assert_called_once_with("xml")
@mock.patch.object(libvirt_guest.LOG, 'error')
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create_exception(self, mock_define, mock_log):
fake_xml = '<test>this is a test</test>'
mock_define.side_effect = test.TestingException
self.assertRaises(test.TestingException,
libvirt_guest.Guest.create,
fake_xml, self.host)
# ensure the XML is logged
self.assertIn(fake_xml, str(mock_log.call_args[0]))
def test_launch(self):
self.guest.launch()
self.domain.createWithFlags.assert_called_once_with(0)
def test_launch_and_pause(self):
self.guest.launch(pause=True)
self.domain.createWithFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_START_PAUSED)
@mock.patch.object(libvirt_guest.LOG, 'exception')
@mock.patch.object(encodeutils, 'safe_decode')
def test_launch_exception(self, mock_safe_decode, mock_log):
fake_xml = '<test>this is a test</test>'
self.domain.createWithFlags.side_effect = test.TestingException
mock_safe_decode.return_value = fake_xml
self.assertRaises(test.TestingException, self.guest.launch)
self.assertEqual(1, mock_safe_decode.called)
# ensure the XML is logged
self.assertIn(fake_xml, str(mock_log.call_args[0]))
def test_shutdown(self):
self.domain.shutdown = mock.MagicMock()
self.guest.shutdown()
self.domain.shutdown.assert_called_once_with()
def test_get_interfaces(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="network">
<target dev="vnet0"/>
</interface>
<interface type="network">
<target dev="vnet1"/>
</interface>
</devices>
</domain>"""
self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces())
def test_get_interfaces_exception(self):
self.domain.XMLDesc.return_value = "<bad xml>"
self.assertEqual([], self.guest.get_interfaces())
def test_poweroff(self):
self.guest.poweroff()
self.domain.destroy.assert_called_once_with()
def test_resume(self):
self.guest.resume()
self.domain.resume.assert_called_once_with()
@mock.patch('time.time', return_value=1234567890.125)
def test_time_sync_no_errors(self, time_mock):
self.domain.setTime.side_effect = fakelibvirt.libvirtError('error')
self.guest.sync_guest_time()
self.domain.setTime.assert_called_once_with(time={
'nseconds': 125000000,
'seconds': 1234567890})
def test_get_vcpus_info(self):
self.domain.vcpus.return_value = ([(0, 1, int(10290000000), 2)],
[(True, True)])
vcpus = list(self.guest.get_vcpus_info())
self.assertEqual(0, vcpus[0].id)
self.assertEqual(2, vcpus[0].cpu)
self.assertEqual(1, vcpus[0].state)
self.assertEqual(int(10290000000), vcpus[0].time)
def test_delete_configuration(self):
self.guest.delete_configuration()
self.domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
def test_delete_configuration_with_nvram(self):
self.guest.delete_configuration(support_uefi=True)
self.domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE |
fakelibvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
def test_delete_configuration_exception(self):
self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError(
'oops')
self.domain.ID.return_value = 1
self.guest.delete_configuration()
self.domain.undefine.assert_called_once_with()
def test_attach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_attach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_attach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_attach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_detach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_detach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_detach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_get_xml_desc(self):
self.guest.get_xml_desc()
self.domain.XMLDesc.assert_called_once_with(flags=0)
def test_get_xml_desc_dump_inactive(self):
self.guest.get_xml_desc(dump_inactive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
def test_get_xml_desc_dump_sensitive(self):
self.guest.get_xml_desc(dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_SECURE)
def test_get_xml_desc_dump_inactive_dump_sensitive(self):
self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
def test_get_xml_desc_dump_migratable(self):
self.guest.get_xml_desc(dump_migratable=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
def test_has_persistent_configuration(self):
self.assertTrue(
self.guest.has_persistent_configuration())
self.domain.isPersistent.assert_called_once_with()
def test_save_memory_state(self):
self.guest.save_memory_state()
self.domain.managedSave.assert_called_once_with(0)
def test_get_block_device(self):
disk = 'vda'
gblock = self.guest.get_block_device(disk)
self.assertEqual(disk, gblock._disk)
self.assertEqual(self.guest, gblock._guest)
def test_set_user_password(self):
self.guest.set_user_password("foo", "123")
self.domain.setUserPassword.assert_called_once_with("foo", "123", 0)
def test_get_config(self):
xml = "<domain type='kvm'><name>fake</name></domain>"
self.domain.XMLDesc.return_value = xml
result = self.guest.get_config()
self.assertIsInstance(result, vconfig.LibvirtConfigGuest)
self.assertEqual('kvm', result.virt_type)
self.assertEqual('fake', result.name)
def test_get_devices(self):
xml = """
<domain type='qemu'>
<name>QEMUGuest1</name>
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<memory unit='KiB'>219136</memory>
<currentMemory unit='KiB'>219136</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='i686' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu</emulator>
<disk type='block' device='disk'>
<driver name='qemu' type='raw'/>
<source dev='/dev/HostVG/QEMUGuest2'/>
<target dev='hda' bus='ide'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<auth username='myname'>
<secret type='iscsi' usage='mycluster_myname'/>
</auth>
<source protocol='iscsi' name='iqn.1992-01.com.example'>
<host name='example.org' port='6000'/>
</source>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<source protocol='iscsi' name='iqn.1992-01.com.example/1'>
<host name='example.org' port='6000'/>
</source>
<target dev='vdb' bus='virtio'/>
</disk>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x5'/>
</source>
</hostdev>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/>
</source>
</hostdev>
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>
<controller type='usb' index='0'/>
<controller type='pci' index='0' model='pci-root'/>
<memballoon model='none'/>
</devices>
</domain>
"""
self.domain.XMLDesc.return_value = xml
devs = self.guest.get_all_devices()
# Only currently parse <disk>, <hostdev> and <interface> elements
# hence we're not counting the controller/memballoon
self.assertEqual(6, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[5], vconfig.LibvirtConfigGuestInterface)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk)
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_disks()
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestHostdev)
self.assertEqual(2, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestInterface)
self.assertEqual(1, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestInterface)
cfg = vconfig.LibvirtConfigGuestInterface()
cfg.parse_str("""
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
self.domain.XMLDesc.assert_has_calls([mock.call(0)] * 6)
# now check if the persistent config can be queried too
self.domain.XMLDesc.reset_mock()
devs = self.guest.get_all_devices(
devtype=None, from_persistent_config=True)
self.domain.XMLDesc.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
def test_get_interface_by_cfg_persistent_domain(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>
</devices>
</domain>"""
cfg = vconfig.LibvirtConfigGuestInterface()
cfg.parse_str("""
<interface type="bridge">
<mac address="fa:16:3e:f9:af:ae"/>
<model type="virtio"/>
<driver name="qemu"/>
<source bridge="qbr84008d03-11"/>
<target dev="tap84008d03-11"/>
</interface>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
self.assertIsNone(
self.guest.get_interface_by_cfg(
vconfig.LibvirtConfigGuestInterface(),
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[
mock.call(fakelibvirt.VIR_DOMAIN_XML_INACTIVE),
mock.call(fakelibvirt.VIR_DOMAIN_XML_INACTIVE),
]
)
def test_get_interface_by_cfg_vhostuser(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="vhostuser">
<mac address='fa:16:3e:55:3e:e4'/>
<source type='unix' path='/var/run/openvswitch/vhued80c655-4e'
mode='server'/>
<target dev='vhued80c655-4e'/>
<model type='virtio'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>
</devices>
</domain>"""
cfg = vconfig.LibvirtConfigGuestInterface()
cfg.parse_str("""<interface type="vhostuser">
<mac address='fa:16:3e:55:3e:e4'/>
<model type="virtio"/>
<source type='unix' path='/var/run/openvswitch/vhued80c655-4e'
mode='server'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
def test_get_interface_by_cfg_hostdev_pci(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0x0000' bus='0x81' slot='0x00'
function='0x1'/>
</source>
<alias name='hostdev0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</hostdev>
</devices>
</domain>"""
cfg = vconfig.LibvirtConfigGuestHostdevPCI()
cfg.parse_str("""
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0x0000' bus='0x81' slot='0x00' function='0x1'/>
</source>
</hostdev>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
cfg.parse_str("""
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address domain='0000' bus='81' slot='00' function='1'/>
</source>
</hostdev>""")
self.assertIsNotNone(
self.guest.get_interface_by_cfg(cfg))
self.assertIsNone(self.guest.get_interface_by_cfg(None))
def test_get_info(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
self.domain.ID.return_value = 6
info = self.guest.get_info(self.host)
self.domain.info.assert_called_once_with()
self.assertEqual(1, info.state)
self.assertEqual(6, info.internal_id)
def test_get_power_state(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
power = self.guest.get_power_state(self.host)
self.assertEqual(1, power)
def test_is_active_when_domain_is_active(self):
with mock.patch.object(self.domain, "isActive", return_value=True):
self.assertTrue(self.guest.is_active())
def test_is_active_when_domain_not_active(self):
with mock.patch.object(self.domain, "isActive", return_value=False):
self.assertFalse(self.guest.is_active())
def test_freeze_filesystems(self):
self.guest.freeze_filesystems()
self.domain.fsFreeze.assert_called_once_with()
def test_thaw_filesystems(self):
self.guest.thaw_filesystems()
self.domain.fsThaw.assert_called_once_with()
def _conf_snapshot(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestSnapshotDisk)
conf.to_xml.return_value = '<disk/>'
return conf
def test_snapshot(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf)
self.domain.snapshotCreateXML('<disk/>', flags=0)
conf.to_xml.assert_called_once_with()
def test_snapshot_no_metadata(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, no_metadata=True)
self.domain.snapshotCreateXML(
'<disk/>',
flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)
conf.to_xml.assert_called_once_with()
def test_snapshot_disk_only(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, disk_only=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)
conf.to_xml.assert_called_once_with()
def test_snapshot_reuse_ext(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, reuse_ext=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
conf.to_xml.assert_called_once_with()
def test_snapshot_quiesce(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, quiesce=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
conf.to_xml.assert_called_once_with()
def test_snapshot_all(self):
conf = self._conf_snapshot()
self.guest.snapshot(conf, no_metadata=True,
disk_only=True, reuse_ext=True,
quiesce=True)
self.domain.snapshotCreateXML(
'<disk/>', flags=(
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE))
conf.to_xml.assert_called_once_with()
def test_pause(self):
self.guest.pause()
self.domain.suspend.assert_called_once_with()
def test_migrate_v3(self):
self.guest.migrate('an-uri', flags=1, migrate_uri='dest-uri',
migrate_disks='disk1',
destination_xml='</xml>',
bandwidth=2)
self.domain.migrateToURI3.assert_called_once_with(
'an-uri', flags=1, params={'migrate_uri': 'dest-uri',
'migrate_disks': 'disk1',
'destination_xml': '</xml>',
'persistent_xml': '</xml>',
'bandwidth': 2})
def test_abort_job(self):
self.guest.abort_job()
self.domain.abortJob.assert_called_once_with()
def test_migrate_configure_max_downtime(self):
self.guest.migrate_configure_max_downtime(1000)
self.domain.migrateSetMaxDowntime.assert_called_once_with(1000)
def test_set_metadata(self):
meta = mock.Mock(spec=vconfig.LibvirtConfigGuestMetaNovaInstance)
meta.to_xml.return_value = "</xml>"
self.guest.set_metadata(meta)
self.domain.setMetadata.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_METADATA_ELEMENT, "</xml>", "instance",
vconfig.NOVA_NS, flags=0)
def test_set_metadata_persistent(self):
meta = mock.Mock(spec=vconfig.LibvirtConfigGuestMetaNovaInstance)
meta.to_xml.return_value = "</xml>"
self.guest.set_metadata(meta, persistent=True)
self.domain.setMetadata.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_METADATA_ELEMENT, "</xml>", "instance",
vconfig.NOVA_NS, flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_set_metadata_device_live(self):
meta = mock.Mock(spec=vconfig.LibvirtConfigGuestMetaNovaInstance)
meta.to_xml.return_value = "</xml>"
self.guest.set_metadata(meta, live=True)
self.domain.setMetadata.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_METADATA_ELEMENT, "</xml>", "instance",
vconfig.NOVA_NS, flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_set_metadata_persistent_live(self):
meta = mock.Mock(spec=vconfig.LibvirtConfigGuestMetaNovaInstance)
meta.to_xml.return_value = "</xml>"
self.guest.set_metadata(meta, persistent=True, live=True)
self.domain.setMetadata.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_METADATA_ELEMENT, "</xml>", "instance",
vconfig.NOVA_NS, flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE |
fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertXmlEqual(diska_xml, actual_diska_xml)
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertXmlEqual(diskb_xml, actual_diskb_xml)
self.assertIsNone(guest.get_disk('vdc'))
dom.XMLDesc.assert_has_calls([mock.call(0)] * 3)
def test_get_disk_xml_from_persistent_config(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
actual_diska_xml = guest.get_disk(
'vda', from_persistent_config=True).to_xml()
self.assertXmlEqual(diska_xml, actual_diska_xml)
dom.XMLDesc.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
class GuestBlockTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestBlockTestCase, self).setUp()
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
self.gblock = self.guest.get_block_device('vda')
def test_abort_job(self):
self.gblock.abort_job()
self.domain.blockJobAbort.assert_called_once_with('vda', flags=0)
def test_abort_job_async(self):
self.gblock.abort_job(async_=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)
def test_abort_job_pivot(self):
self.gblock.abort_job(pivot=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
def test_get_job_info(self):
self.domain.blockJobInfo.return_value = {
"type": 1,
"bandwidth": 18,
"cur": 66,
"end": 100}
info = self.gblock.get_job_info()
self.assertEqual(1, info.job)
self.assertEqual(18, info.bandwidth)
self.assertEqual(66, info.cur)
self.assertEqual(100, info.end)
self.domain.blockJobInfo.assert_called_once_with('vda', flags=0)
def test_resize(self):
self.gblock.resize(10)
self.domain.blockResize.assert_called_once_with('vda', 10, flags=1)
def test_rebase(self):
self.gblock.rebase("foo")
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=0)
def test_rebase_shallow(self):
self.gblock.rebase("foo", shallow=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
def test_rebase_reuse_ext(self):
self.gblock.rebase("foo", reuse_ext=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
def test_rebase_copy(self):
self.gblock.rebase("foo", copy=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY)
def test_rebase_relative(self):
self.gblock.rebase("foo", relative=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
def test_rebase_copy_dev(self):
self.gblock.rebase("foo", copy_dev=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV)
def test_commit(self):
self.gblock.commit("foo", "top")
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0, flags=0)
def test_commit_relative(self):
self.gblock.commit("foo", "top", relative=True)
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
def test_is_job_complete_cur_end_zeros(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 0,
"end": 0}
is_complete = self.gblock.is_job_complete()
self.assertFalse(is_complete)
def test_is_job_complete_current_lower_than_end(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 95,
"end": 100}
is_complete = self.gblock.is_job_complete()
self.assertFalse(is_complete)
def test_is_job_complete_not_ready(self):
gblock = self.guest.get_block_device('vda')
disk = vconfig.LibvirtConfigGuestDisk()
disk.mirror = vconfig.LibvirtConfigGuestDiskMirror()
with mock.patch.object(self.guest, 'get_disk', return_value=disk):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 100,
"end": 100}
is_complete = gblock.is_job_complete()
self.assertFalse(is_complete)
def test_is_job_complete_ready(self):
gblock = self.guest.get_block_device('vda')
disk = vconfig.LibvirtConfigGuestDisk()
disk.mirror = vconfig.LibvirtConfigGuestDiskMirror()
disk.mirror.ready = 'yes'
with mock.patch.object(self.guest, 'get_disk', return_value=disk):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 100,
"end": 100}
is_complete = gblock.is_job_complete()
self.assertTrue(is_complete)
def test_is_job_complete_no_job(self):
self.domain.blockJobInfo.return_value = {}
is_complete = self.gblock.is_job_complete()
self.assertTrue(is_complete)
def test_is_job_complete_exception(self):
self.domain.blockJobInfo.side_effect = fakelibvirt.libvirtError('fake')
self.assertRaises(fakelibvirt.libvirtError,
self.gblock.is_job_complete)
def test_blockStats(self):
self.gblock.blockStats()
self.domain.blockStats.assert_called_once_with('vda')
class JobInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(JobInfoTestCase, self).setUp()
self.useFixture(nova_fixtures.LibvirtFixture())
self.conn = fakelibvirt.openAuth("qemu:///system",
[[], lambda: True])
xml = ("<domain type='kvm'>"
" <name>instance-0000000a</name>"
"</domain>")
self.dom = self.conn.createXML(xml, 0)
self.guest = libvirt_guest.Guest(self.dom)
libvirt_guest.JobInfo._have_job_stats = True
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats(self, mock_stats, mock_info):
mock_stats.return_value = {
"type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
"memory_total": 75,
"memory_processed": 50,
"memory_remaining": 33,
"some_new_libvirt_stat_we_dont_know_about": 83
}
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(0, info.disk_total)
self.assertEqual(0, info.disk_processed)
self.assertEqual(0, info.disk_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_support(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_attr_error(self, mock_stats, mock_info):
mock_stats.side_effect = AttributeError("No such API")
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = self.guest.get_job_info()
self.assertIsInstance(info, libvirt_guest.JobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import json
import os
import sys
import re
import time
import tempfile
import itertools
import datetime
import pstats
import socket
import struct
import threading
import traceback
import six
from .console import log
from .results import Results, format_benchmark_result
from . import statistics
from . import util
WIN = (os.name == "nt")
# Can't use benchmark.__file__, because that points to the compiled
# file, so it can't be run by another version of Python.
BENCHMARK_RUN_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "benchmark.py")
JSON_ERROR_RETCODE = -257
BenchmarkResult = util.namedtuple_with_doc(
'BenchmarkResult',
['result', 'samples', 'number', 'errcode', 'stderr', 'profile'],
"""
Postprocessed benchmark result
Attributes
----------
result : list of object
List of numeric values of the benchmarks (one for each parameter
combination).
Values are `None` if benchmark failed or NaN if it was skipped.
samples : list of {list, None}
List of lists of sampled raw data points (or Nones if
no sampling done).
number : list of {dict, None}
List of actual repeat counts for each sample (or Nones if
no sampling done).
errcode : int
Process exit code
stderr : str
Process stdout/stderr output
profile : bytes
If `profile` is `True` and run was at least partially successful,
this key will be a byte string containing the cProfile data.
Otherwise, None.
""")
def skip_benchmarks(benchmarks, env, results=None):
"""
Mark benchmarks as skipped.
Parameters
----------
benchmarks : Benchmarks
Set of benchmarks to skip
env : Environment
Environment to skip them in
results : Results, optional
Where to store the results.
If omitted, stored to a new unnamed Results object.
Returns
-------
results : Results
Benchmark results.
"""
if results is None:
results = Results.unnamed()
log.warning("Skipping {0}".format(env.name))
with log.indent():
for name, benchmark in six.iteritems(benchmarks):
log.step()
log.warning('{0} skipped'.format(name))
started_at = datetime.datetime.utcnow()
r = fail_benchmark(benchmark)
results.add_result(benchmark, r,
selected_idx=benchmarks.benchmark_selection.get(name),
started_at=started_at)
return results
def run_benchmarks(benchmarks, env, results=None,
show_stderr=False, quick=False, profile=False,
extra_params=None,
record_samples=False, append_samples=False,
run_rounds=None,
launch_method=None):
"""
Run all of the benchmarks in the given `Environment`.
Parameters
----------
benchmarks : Benchmarks
Benchmarks to run
env : Environment object
Environment in which to run the benchmarks.
results : Results, optional
Where to store the results.
If omitted, stored to a new unnamed Results object.
show_stderr : bool, optional
When `True`, display any stderr emitted by the benchmark.
quick : bool, optional
When `True`, run each benchmark function exactly once.
This is useful to quickly find errors in the benchmark
functions, without taking the time necessary to get
accurate timings.
profile : bool, optional
When `True`, run the benchmark through the `cProfile`
profiler.
extra_params : dict, optional
Override values for benchmark attributes.
record_samples : bool, optional
Whether to retain result samples or discard them.
append_samples : bool, optional
Whether to retain any previously measured result samples
and use them in statistics computations.
run_rounds : sequence of int, optional
Run rounds for benchmarks with multiple rounds.
If None, run all rounds.
launch_method : {'auto', 'spawn', 'forkserver'}, optional
Benchmark launching method to use.
Returns
-------
results : Results
Benchmark results.
"""
if extra_params is None:
extra_params = {}
else:
extra_params = dict(extra_params)
if quick:
extra_params['number'] = 1
extra_params['repeat'] = 1
extra_params['warmup_time'] = 0
extra_params['rounds'] = 1
if results is None:
results = Results.unnamed()
# Find all setup_cache routines needed
setup_cache_timeout = {}
benchmark_order = {}
cache_users = {}
max_rounds = 0
def get_rounds(benchmark):
"""Get number of rounds to use for a job"""
if 'rounds' in extra_params:
return int(extra_params['rounds'])
else:
return int(benchmark.get('rounds', 1))
for name, benchmark in sorted(six.iteritems(benchmarks)):
key = benchmark.get('setup_cache_key')
setup_cache_timeout[key] = max(benchmark.get('setup_cache_timeout',
benchmark['timeout']),
setup_cache_timeout.get(key, 0))
benchmark_order.setdefault(key, []).append((name, benchmark))
max_rounds = max(max_rounds, get_rounds(benchmark))
cache_users.setdefault(key, set()).add(name)
if run_rounds is None:
run_rounds = list(range(1, max_rounds + 1))
# Interleave benchmark runs, in setup_cache order
existing_results = results.get_result_keys(benchmarks)
def iter_run_items():
for run_round in run_rounds[::-1]:
for setup_cache_key, benchmark_set in six.iteritems(benchmark_order):
for name, benchmark in benchmark_set:
log.step()
rounds = get_rounds(benchmark)
if run_round > rounds:
if (not append_samples and
run_round == run_rounds[-1] and
name in existing_results):
# We need to remove samples here so that
# append_samples=False has an effect on all
# benchmarks regardless of whether they were
# run this round.
selected_idx = benchmarks.benchmark_selection.get(name)
results.remove_samples(name, selected_idx)
continue
is_final = (run_round == 1)
yield name, benchmark, setup_cache_key, is_final
# Run benchmarks in order
cache_dirs = {None: None}
failed_benchmarks = set()
failed_setup_cache = {}
if append_samples:
previous_result_keys = existing_results
else:
previous_result_keys = set()
benchmark_durations = {}
log.info("Benchmarking {0}".format(env.name))
partial_info_time = None
indent = log.indent()
indent.__enter__()
spawner = get_spawner(env, benchmarks.benchmark_dir,
launch_method=launch_method)
try:
# Preimport benchmark suite (if using forkserver)
success, out = spawner.preimport()
if success:
if show_stderr and out:
log.info("Importing benchmark suite produced output:")
with log.indent():
log.error(out.rstrip())
else:
log.warning("Importing benchmark suite failed (skipping all benchmarks).")
if show_stderr and out:
with log.indent():
log.error(out)
stderr = 'asv: benchmark suite import failed'
for name, benchmark, setup_cache_key, is_final in iter_run_items():
if name in failed_benchmarks:
continue
selected_idx = benchmarks.benchmark_selection.get(name)
started_at = datetime.datetime.utcnow()
res = fail_benchmark(benchmark, stderr=stderr)
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
record_samples=record_samples)
failed_benchmarks.add(name)
return results
# Run benchmarks
for name, benchmark, setup_cache_key, is_final in iter_run_items():
selected_idx = benchmarks.benchmark_selection.get(name)
started_at = datetime.datetime.utcnow()
# Don't try to rerun failed benchmarks
if name in failed_benchmarks:
if is_final:
partial_info_time = None
log.info(name, reserve_space=True)
log_benchmark_result(results, benchmark,
show_stderr=show_stderr)
continue
# Setup cache first, if needed
if setup_cache_key is None:
cache_dir = None
elif setup_cache_key in cache_dirs:
cache_dir = cache_dirs[setup_cache_key]
elif setup_cache_key not in failed_setup_cache:
partial_info_time = None
log.info("Setting up {0}".format(setup_cache_key), reserve_space=True)
params_str = json.dumps({'cpu_affinity': extra_params.get('cpu_affinity')})
cache_dir, stderr = spawner.create_setup_cache(
name, setup_cache_timeout[setup_cache_key], params_str)
if cache_dir is not None:
log.add_padded('ok')
cache_dirs[setup_cache_key] = cache_dir
else:
log.add_padded('failed')
if stderr and show_stderr:
with log.indent():
log.error(stderr)
failed_setup_cache[setup_cache_key] = stderr
duration = (datetime.datetime.utcnow() - started_at).total_seconds()
results.set_setup_cache_duration(setup_cache_key, duration)
started_at = datetime.datetime.utcnow()
if setup_cache_key in failed_setup_cache:
# Mark benchmark as failed
partial_info_time = None
log.warning('{0} skipped (setup_cache failed)'.format(name))
stderr = 'asv: setup_cache failed\n\n{}'.format(failed_setup_cache[setup_cache_key])
res = fail_benchmark(benchmark, stderr=stderr)
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
record_samples=record_samples)
failed_benchmarks.add(name)
continue
# If appending to previous results, make sure to use the
# same value for 'number' attribute.
cur_extra_params = extra_params
if name in previous_result_keys:
cur_extra_params = []
prev_stats = results.get_result_stats(name, benchmark['params'])
for s in prev_stats:
if s is None or 'number' not in s:
p = extra_params
else:
p = dict(extra_params)
p['number'] = s['number']
cur_extra_params.append(p)
# Run benchmark
if is_final:
partial_info_time = None
log.info(name, reserve_space=True)
elif partial_info_time is None or time.time() > partial_info_time + 30:
partial_info_time = time.time()
log.info('Running ({0}--)'.format(name))
res = run_benchmark(benchmark, spawner,
profile=profile,
selected_idx=selected_idx,
extra_params=cur_extra_params,
cwd=cache_dir)
# Retain runtime durations
ended_at = datetime.datetime.utcnow()
if name in benchmark_durations:
benchmark_durations[name] += (ended_at - started_at).total_seconds()
else:
benchmark_durations[name] = (ended_at - started_at).total_seconds()
# Save result
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
duration=benchmark_durations[name],
record_samples=(not is_final or record_samples),
append_samples=(name in previous_result_keys))
previous_result_keys.add(name)
if all(r is None for r in res.result):
failed_benchmarks.add(name)
# Log result
if is_final:
partial_info_time = None
log_benchmark_result(results, benchmark,
show_stderr=show_stderr)
else:
log.add('.')
# Cleanup setup cache, if no users left
if cache_dir is not None and is_final:
cache_users[setup_cache_key].remove(name)
if not cache_users[setup_cache_key]:
# No users of this cache left, perform cleanup
util.long_path_rmtree(cache_dir, True)
del cache_dirs[setup_cache_key]
finally:
# Cleanup any dangling caches
for cache_dir in cache_dirs.values():
if cache_dir is not None:
util.long_path_rmtree(cache_dir, True)
indent.__exit__(None, None, None)
spawner.close()
return results
def get_spawner(env, benchmark_dir, launch_method):
has_fork = hasattr(os, 'fork') and hasattr(socket, 'AF_UNIX')
if launch_method in (None, 'auto'):
# Don't use ForkServer as default on OSX, because many Apple
# things are not fork-safe
if has_fork and sys.platform not in ('darwin',):
launch_method = "forkserver"
else:
launch_method = "spawn"
if launch_method == "spawn":
spawner_cls = Spawner
elif launch_method == "forkserver":
if not has_fork:
raise util.UserError("'forkserver' launch method not available "
"on this platform")
spawner_cls = ForkServer
else:
raise ValueError("Invalid launch_method: {}".format(launch_method))
return spawner_cls(env, benchmark_dir)
def log_benchmark_result(results, benchmark, show_stderr=False):
info, details = format_benchmark_result(results, benchmark)
log.add_padded(info)
if details:
log.info(details, color='default')
# Dump program output
stderr = results.stderr.get(benchmark['name'])
errcode = results.errcode.get(benchmark['name'])
if errcode not in (None, 0, util.TIMEOUT_RETCODE, JSON_ERROR_RETCODE):
# Display also error code
if not stderr:
stderr = ""
else:
stderr += "\n"
stderr += "asv: benchmark failed (exit status {})".format(errcode)
if stderr and show_stderr:
with log.indent():
log.error(stderr)
def fail_benchmark(benchmark, stderr='', errcode=1):
"""
Return a BenchmarkResult describing a failed benchmark.
"""
if benchmark['params']:
# Mark only selected parameter combinations skipped
params = itertools.product(*benchmark['params'])
result = [None for idx in params]
samples = [None] * len(result)
number = [None] * len(result)
else:
result = [None]
samples = [None]
number = [None]
return BenchmarkResult(result=result,
samples=samples,
number=number,
errcode=errcode,
stderr=stderr,
profile=None)
def run_benchmark(benchmark, spawner, profile,
selected_idx=None,
extra_params=None,
cwd=None,
prev_result=None):
"""
Run a benchmark.
Parameters
----------
benchmark : dict
Benchmark object dict
spawner : Spawner
Benchmark process spawner
profile : bool
Whether to run with profile
selected_idx : set, optional
Set of parameter indices to run for.
extra_params : {dict, list}, optional
Additional parameters to pass to the benchmark.
If a list, each entry should correspond to a benchmark
parameter combination.
cwd : str, optional
Working directory to run the benchmark in.
If None, run in a temporary directory.
Returns
-------
result : BenchmarkResult
Result data.
"""
if extra_params is None:
extra_params = {}
result = []
samples = []
number = []
profiles = []
stderr = ''
errcode = 0
if benchmark['params']:
param_iter = enumerate(itertools.product(*benchmark['params']))
else:
param_iter = [(0, None)]
for param_idx, params in param_iter:
if selected_idx is not None and param_idx not in selected_idx:
result.append(util.nan)
samples.append(None)
number.append(None)
profiles.append(None)
continue
if isinstance(extra_params, list):
cur_extra_params = extra_params[param_idx]
else:
cur_extra_params = extra_params
res = _run_benchmark_single_param(
benchmark, spawner, param_idx,
extra_params=cur_extra_params, profile=profile,
cwd=cwd)
result += res.result
samples += res.samples
number += res.number
profiles.append(res.profile)
if res.stderr:
stderr += "\n\n"
stderr += res.stderr
if res.errcode != 0:
errcode = res.errcode
return BenchmarkResult(
result=result,
samples=samples,
number=number,
errcode=errcode,
stderr=stderr.strip(),
profile=_combine_profile_data(profiles)
)
def _run_benchmark_single_param(benchmark, spawner, param_idx,
profile, extra_params, cwd):
"""
Run a benchmark, for single parameter combination index in case it
is parameterized
Parameters
----------
benchmark : dict
Benchmark object dict
spawner : Spawner
Benchmark process spawner
param_idx : {int, None}
Parameter index to run benchmark for
profile : bool
Whether to run with profile
extra_params : dict
Additional parameters to pass to the benchmark
cwd : {str, None}
Working directory to run the benchmark in.
If None, run in a temporary directory.
Returns
-------
result : BenchmarkResult
Result data.
"""
name = benchmark['name']
if benchmark['params']:
name += '-%d' % (param_idx,)
if profile:
profile_fd, profile_path = tempfile.mkstemp()
os.close(profile_fd)
else:
profile_path = 'None'
params_str = json.dumps(extra_params)
if cwd is None:
real_cwd = tempfile.mkdtemp()
else:
real_cwd = cwd
result_file = tempfile.NamedTemporaryFile(delete=False)
try:
result_file.close()
out, errcode = spawner.run(
name=name, params_str=params_str, profile_path=profile_path,
result_file_name=result_file.name,
timeout=benchmark['timeout'],
cwd=real_cwd)
if errcode != 0:
if errcode == util.TIMEOUT_RETCODE:
out += "\n\nasv: benchmark timed out (timeout {0}s)\n".format(benchmark['timeout'])
result = None
samples = None
number = None
else:
with open(result_file.name, 'r') as stream:
data = stream.read()
try:
data = json.loads(data)
except ValueError as exc:
data = None
errcode = JSON_ERROR_RETCODE
out += "\n\nasv: failed to parse benchmark result: {0}\n".format(exc)
# Special parsing for timing benchmark results
if isinstance(data, dict) and 'samples' in data and 'number' in data:
result = True
samples = data['samples']
number = data['number']
else:
result = data
samples = None
number = None
if benchmark['params'] and out:
params, = itertools.islice(itertools.product(*benchmark['params']),
param_idx, param_idx + 1)
out = "For parameters: {0}\n{1}".format(", ".join(params), out)
if profile:
with io.open(profile_path, 'rb') as profile_fd:
profile_data = profile_fd.read()
profile_data = profile_data if profile_data else None
else:
profile_data = None
return BenchmarkResult(
result=[result],
samples=[samples],
number=[number],
errcode=errcode,
stderr=out.strip(),
profile=profile_data)
except KeyboardInterrupt:
spawner.interrupt()
raise util.UserError("Interrupted.")
finally:
os.remove(result_file.name)
if profile:
os.remove(profile_path)
if cwd is None:
util.long_path_rmtree(real_cwd, True)
class Spawner(object):
"""
Manage launching individual benchmark.py commands
"""
def __init__(self, env, benchmark_dir):
self.env = env
self.benchmark_dir = os.path.abspath(benchmark_dir)
self.interrupted = False
def interrupt(self):
self.interrupted = True
def create_setup_cache(self, benchmark_id, timeout, params_str):
cache_dir = tempfile.mkdtemp()
env_vars = dict(os.environ)
env_vars.update(self.env.env_vars)
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'setup_cache',
os.path.abspath(self.benchmark_dir),
benchmark_id, params_str],
dots=False, display_error=False,
return_stderr=True, valid_return_codes=None,
redirect_stderr=True,
cwd=cache_dir,
timeout=timeout,
env=env_vars)
if errcode == 0:
return cache_dir, None
else:
util.long_path_rmtree(cache_dir, True)
out += '\nasv: setup_cache failed (exit status {})'.format(errcode)
return None, out.strip()
def run(self, name, params_str, profile_path, result_file_name, timeout, cwd):
env_vars = dict(os.environ)
env_vars.update(self.env.env_vars)
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'run', os.path.abspath(self.benchmark_dir),
name, params_str, profile_path, result_file_name],
dots=False, timeout=timeout,
display_error=False, return_stderr=True, redirect_stderr=True,
valid_return_codes=None, cwd=cwd,
env=env_vars)
return out, errcode
def preimport(self):
return True, ""
def close(self):
pass
class ForkServer(Spawner):
def __init__(self, env, root):
super(ForkServer, self).__init__(env, root)
if not (hasattr(os, 'fork') and hasattr(os, 'setpgid')):
raise RuntimeError("ForkServer only available on POSIX")
self.tmp_dir = tempfile.mkdtemp(prefix='asv-forkserver-')
self.socket_name = os.path.join(self.tmp_dir, 'socket')
env_vars = dict(os.environ)
env_vars.update(env.env_vars)
self.server_proc = env.run(
[BENCHMARK_RUN_SCRIPT, 'run_server', self.benchmark_dir, self.socket_name],
return_popen=True,
redirect_stderr=True,
env=env_vars)
self._server_output = None
self.stdout_reader_thread = threading.Thread(target=self._stdout_reader)
self.stdout_reader_thread.start()
# Wait for the socket to appear
while self.stdout_reader_thread.is_alive():
if os.path.exists(self.socket_name):
break
time.sleep(0.05)
if not os.path.exists(self.socket_name):
os.rmdir(self.tmp_dir)
raise RuntimeError("Failed to start server thread")
def _stdout_reader(self):
try:
out = self.server_proc.stdout.read()
self.server_proc.stdout.close()
out = out.decode('utf-8', 'replace')
except Exception as exc:
import traceback
out = traceback.format_exc()
self._server_output = out
def run(self, name, params_str, profile_path, result_file_name, timeout, cwd):
msg = {'action': 'run',
'benchmark_id': name,
'params_str': params_str,
'profile_path': profile_path,
'result_file': result_file_name,
'timeout': timeout,
'cwd': cwd}
result = self._send_command(msg)
return result['out'], result['errcode']
def preimport(self):
success = True
out = ""
try:
out = self._send_command({'action': 'preimport'})
except Exception as exc:
success = False
out = "asv: benchmark runner crashed\n"
if isinstance(exc, util.UserError):
out += str(exc)
else:
out += traceback.format_exc()
out = out.rstrip()
return success, out
def _send_command(self, msg):
msg = json.dumps(msg)
if sys.version_info[0] >= 3:
msg = msg.encode('utf-8')
# Connect (with wait+retry)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for retry in range(5, 0, -1):
try:
s.connect(self.socket_name)
break
except socket.error:
if retry > 1:
time.sleep(0.2)
else:
raise
# Send command
try:
s.sendall(struct.pack('<Q', len(msg)))
s.sendall(msg)
# Read result
read_size, = struct.unpack('<Q', util.recvall(s, 8))
result_text = util.recvall(s, read_size)
if sys.version_info[0] >= 3:
result_text = result_text.decode('utf-8')
result = json.loads(result_text)
except Exception:
exitcode = self.server_proc.poll()
if exitcode is not None:
raise util.UserError("Process exited with code {0}".format(exitcode))
raise
finally:
s.close()
return result
def close(self):
import signal
# Check for termination
if self.server_proc.poll() is None:
util._killpg_safe(self.server_proc.pid, signal.SIGINT)
if self.server_proc.poll() is None:
time.sleep(0.1)
if self.server_proc.poll() is None:
# Kill process group
util._killpg_safe(self.server_proc.pid, signal.SIGKILL)
self.server_proc.wait()
self.stdout_reader_thread.join()
if self._server_output and not self.interrupted:
with log.indent():
log.error("asv: forkserver:")
log.error(self._server_output)
util.long_path_rmtree(self.tmp_dir)
def _combine_profile_data(datasets):
"""
Combine a list of profile data to a single profile
"""
datasets = [data for data in datasets if data is not None]
if not datasets:
return None
elif len(datasets) == 1:
return datasets[0]
# Load and combine stats
stats = None
while datasets:
data = datasets.pop(0)
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.write(data)
f.close()
if stats is None:
stats = pstats.Stats(f.name)
else:
stats.add(f.name)
finally:
os.remove(f.name)
# Write combined stats out
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
stats.dump_stats(f.name)
with open(f.name, 'rb') as fp:
return fp.read()
finally:
os.remove(f.name)
|
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.utils import timezone
from olaf.models import *
from olaf.forms import *
from olaf.utility import usertools
from olaf.chess.controller import proccess_move
def index ( request ):
args = {}
message = request.session.pop ( 'message', default = None )
if ( message is not None ):
args [ 'message' ] = message
if ( request.user.is_authenticated ):
if ( request.method == 'POST' ):
if ( request.POST.get ( 'game_id' ) is not None ):
game_id = request.POST.get ( 'game_id' )
if ( game_id == '-1' ):
game_id = usertools.new_game ( request )
request.session [ 'game_id' ] = game_id
else:
request.session.pop ( 'game_id', default = None )
f = lambda a : str ( a.date () ) + " - " + str ( a.hour ) + ":" + str ( a.minute ) + ":" + str ( a.second )
args [ 'game_list' ] = list ([str ( game.id ), f ( game.creation_time )] for game in request.user.userdata.game_history.filter ( result = 0 ).order_by ( '-creation_time' ) )
if ( request.session.get ( 'game_id' ) is not None ):
args [ 'game_board' ] = usertools.get_translated_game_board ( request )
else:
args [ 'game_board' ] = None
return render ( request, 'olaf/index_logged_in.html', args )
else:
args [ 'login_form' ] = LoginForm ()
args [ 'register_form' ] = RegisterForm ()
args [ 'score' ] = list ( [user.master.username, user.wins, user.loses, user.ties] for user in UserData.objects.filter ( is_active = True ) )
return render ( request, 'olaf/index_not_logged_in.html', args )
form_operation_dict = {
'login' : (
usertools.login_user,
LoginForm,
'olaf/login.html',
{},
'index',
{ 'message' : "You're logged in. :)"}
),
'register' : (
usertools.register_user,
RegisterForm,
'olaf/register.html',
{},
'index',
{ 'message' : "An activation email has been sent to you" }
),
'password_reset_request' : (
usertools.init_pass_reset_token,
ForgotPasswordUsernameOrEmailForm,
'olaf/password_reset_request.html',
{},
'index',
{ 'message' : "An email containing the password reset link will be sent to your email"}
),
'reset_password' : (
usertools.reset_password_action,
PasswordChangeForm,
'olaf/reset_password.html',
{},
'olaf:login',
{ 'message' : "Password successfully changed, you can login now" }
),
'resend_activation_email' : (
usertools.resend_activation_email,
ResendActivationUsernameOrEmailForm,
'olaf/resend_activation_email.html',
{},
'index',
{ 'message' : "Activation email successfully sent to your email" }
),
}
def form_operation ( request, oper, *args ):
func, FORM, fail_template, fail_args, success_url, success_args = form_operation_dict [ oper ]
if ( request.method == 'POST' ):
form = FORM ( request.POST )
if ( form.is_valid () ):
func ( request, form, *args )
for key in success_args:
request.session [ key ] = success_args [ key ]
return HttpResponseRedirect ( reverse ( success_url ) )
else:
form = FORM ()
message = request.session.pop ( 'message', default = None )
if ( message is not None ):
fail_args [ 'message' ] = message
fail_args [ 'form' ] = form
return render ( request, fail_template, fail_args )
#view functions
def login_user ( request ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
return form_operation ( request, 'login' )
def register_user ( request ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
return form_operation ( request, 'register' )
def password_reset_request ( request ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
return form_operation ( request, 'password_reset_request' )
def reset_password_action ( request, token ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
tk = ExpirableTokenField.objects.filter ( token = token ).first ()
if ( tk is None ):
request.session [ 'message' ] = "Broken link"
return HttpResponseRedirect ( reverse ( 'index' ) )
else:
if ( timezone.now () <= tk.expiration_time ):
return form_operation ( request, 'reset_password', token )
else:
request.session [ 'message' ] = "Link expired, try getting a new one"
return HttpResponseRedirect ( reverse ( 'olaf:reset_password' ) )
def activate_account ( request, token ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
tk = ExpirableTokenField.objects.filter ( token = token ).first ()
if ( tk is None ):
request.session [ 'message' ] = "Broken link"
return HttpResponseRedirect ( reverse ( 'index' ) )
else:
if ( timezone.now () <= tk.expiration_time ):
if ( tk.user.is_active ):
request.session [ 'message' ] = "Account already active"
return HttpResponseRedirect ( reverse ( 'index' ) )
else:
userdata = tk.user
userdata.is_active = True
userdata.save ()
request.session [ 'message' ] = "Your account has been activated successfully"
return HttpResponseRedirect ( reverse ( 'olaf:login' ) )
else:
request.session [ 'message' ] = "Link expired, try getting a new one"
return HttpResponseRedirect ( reverse ( 'olaf:resend_activation_email' ) )
def resend_activation_email ( request ):
if ( request.user.is_authenticated ):
return HttpResponseRedirect ( reverse ( 'index' ) )
return form_operation ( request, 'resend_activation_email' )
def logout_user ( request ):
usertools.logout_user ( request )
request.session [ 'message' ] = "Goodbye :)"
return HttpResponseRedirect ( reverse ( 'index' ) )
def scoreboard ( request ):
if ( request.method == 'POST' ):
username = request.POST.get ( 'username' )
user = User.objects.filter ( username = username ).first ()
if ( user is None ):
request.session [ 'message' ] = "User not found"
return HttpResponseRedirect ( reverse ( 'olaf:scoreboard' ) )
else:
return HttpResponseRedirect ( reverse ( 'olaf:user_profile', args = (username, ) ) )
else:
args = {}
message = request.session.pop ( 'message', default = None )
if ( message is not None ):
args [ 'message' ] = message
lst = [ (user.master.username, user.wins, user.loses, user.ties) for user in UserData.objects.filter ( is_active = True ) ]
args [ 'lst' ] = lst
if ( request.user.is_authenticated ):
args [ 'logged_in' ] = True
return render ( request, 'olaf/scoreboard.html', args )
def move ( request ):
proccess_move ( request )
return HttpResponseRedirect ( reverse ( 'index' ) )
|
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from oslo_policy import policy as oslo_policy
import six
from webob import exc
from nova.api.openstack.compute import instance_actions as instance_actions_v21
from nova.api.openstack.compute.legacy_v2.contrib import instance_actions \
as instance_actions_v2
from nova.compute import api as compute_api
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
FAKE_UUID = fake_server_actions.FAKE_UUID
FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
def format_action(action):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
'deleted')
for key in to_delete:
if key in action:
del(action[key])
if 'start_time' in action:
# NOTE(danms): Without WSGI above us, these will be just stringified
action['start_time'] = str(action['start_time'].replace(tzinfo=None))
for event in action.get('events', []):
format_event(event)
return action
def format_event(event):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'action_id')
for key in to_delete:
if key in event:
del(event[key])
if 'start_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['start_time'] = str(event['start_time'].replace(tzinfo=None))
if 'finish_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
return event
class InstanceActionsPolicyTestV21(test.NoDBTestCase):
instance_actions = instance_actions_v21
def setUp(self):
super(InstanceActionsPolicyTestV21, self).setUp()
self.controller = self.instance_actions.InstanceActionsController()
def _get_http_req(self, action):
fake_url = '/123/servers/12/%s' % action
return fakes.HTTPRequest.blank(fake_url)
def _set_policy_rules(self):
rules = {'compute:get': '',
'os_compute_api:os-instance-actions':
'project_id:%(project_id)s'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
def test_list_actions_restricted_by_project(self):
self._set_policy_rules()
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = self._get_http_req('os-instance-actions')
self.assertRaises(exception.Forbidden, self.controller.index, req,
str(uuid.uuid4()))
def test_get_action_restricted_by_project(self):
self._set_policy_rules()
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = self._get_http_req('os-instance-actions/1')
self.assertRaises(exception.Forbidden, self.controller.show, req,
str(uuid.uuid4()), '1')
class InstanceActionsPolicyTestV2(InstanceActionsPolicyTestV21):
instance_actions = instance_actions_v2
def _set_policy_rules(self):
rules = {'compute:get': '',
'compute_extension:instance_actions':
'project_id:%(project_id)s'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
class InstanceActionsTestV21(test.NoDBTestCase):
instance_actions = instance_actions_v21
def setUp(self):
super(InstanceActionsTestV21, self).setUp()
self.controller = self.instance_actions.InstanceActionsController()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
return objects.Instance(uuid=instance_uuid)
def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
return fake_instance.fake_instance_obj(None,
**{'name': 'fake', 'project_id': context.project_id})
self.stubs.Set(compute_api.API, 'get', fake_get)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
def _get_http_req(self, action, use_admin_context=False):
fake_url = '/123/servers/12/%s' % action
return fakes.HTTPRequest.blank(fake_url,
use_admin_context=use_admin_context)
def _set_policy_rules(self):
rules = {'compute:get': '',
'os_compute_api:os-instance-actions': '',
'os_compute_api:os-instance-actions:events': 'is_admin:True'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
def test_list_actions(self):
def fake_get_actions(context, uuid):
actions = []
for act in six.itervalues(self.fake_actions[uuid]):
action = models.InstanceAction()
action.update(act)
actions.append(action)
return actions
self.stubs.Set(db, 'actions_get', fake_get_actions)
req = self._get_http_req('os-instance-actions')
res_dict = self.controller.index(req, FAKE_UUID)
for res in res_dict['instanceActions']:
fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
self.assertEqual(format_action(fake_action), format_action(res))
def test_get_action_with_events_allowed(self):
def fake_get_action(context, uuid, request_id):
action = models.InstanceAction()
action.update(self.fake_actions[uuid][request_id])
return action
def fake_get_events(context, action_id):
events = []
for evt in self.fake_events[action_id]:
event = models.InstanceActionEvent()
event.update(evt)
events.append(event)
return events
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
req = self._get_http_req('os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
fake_events = self.fake_events[fake_action['id']]
fake_action['events'] = fake_events
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_get_action_with_events_not_allowed(self):
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
self._set_policy_rules()
req = self._get_http_req('os-instance-actions/1')
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_action_not_found(self):
def fake_no_action(context, uuid, action_id):
return None
self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
req = self._get_http_req('os-instance-actions/1')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, FAKE_REQUEST_ID)
def test_index_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = self._get_http_req('os-instance-actions')
self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
FAKE_UUID)
def test_show_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = self._get_http_req('os-instance-actions/fake')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, 'fake')
class InstanceActionsTestV2(InstanceActionsTestV21):
instance_actions = instance_actions_v2
def _set_policy_rules(self):
rules = {'compute:get': '',
'compute_extension:instance_actions': '',
'compute_extension:instance_actions:events': 'is_admin:True'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
|
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.59293316 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 1.23335068 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *thirdmoon.px ** 3) / (G * (thirdmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.005
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.15, +0.15)
plt.ylim(-0.65, +0.65)
plt.annotate(r"5:3:1", xy=(-0.145, +0.55), size=16)
plt.savefig("fig_system_12.eps", bbox_inches = 'tight')
|
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2019 - Frank Scholz <coherence@beebits.net>
import os
import pygtk
pygtk.require("2.0")
import gtk
if __name__ == '__main__':
from twisted.internet import gtk2reactor
gtk2reactor.install()
from twisted.internet import reactor
from twisted.internet import task,defer
from coherence import log
from coherence.upnp.core.utils import parse_xml, getPage, means_true, generalise_boolean
from pkg_resources import resource_filename
class IGDWidget(log.Loggable):
logCategory = 'igd'
def __init__(self,coherence,device):
self.coherence = coherence
self.device = device
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("delete_event", self.hide)
self.window.set_default_size(480,200)
try:
title = 'InternetGatewayDevice %s' % device.get_friendly_name()
except:
title = 'InternetGatewayDevice'
self.window.set_title(title)
vbox = gtk.VBox(homogeneous=False, spacing=10)
hbox = gtk.HBox(homogeneous=False, spacing=10)
text = gtk.Label("<b>Link:</b>")
text.set_use_markup(True)
self.link_state_image = gtk.Image()
icon = resource_filename(__name__, os.path.join('icons','red.png'))
self.link_down_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','green.png'))
self.link_up_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.link_state_image.set_from_pixbuf(self.link_down_icon)
hbox.add(text)
hbox.add(self.link_state_image)
self.link_type = gtk.Label("<b>Type:</b> unknown (n/a)")
self.link_type.set_use_markup(True)
hbox.add(self.link_type)
vbox.pack_start(hbox,False,False,2)
hbox = gtk.HBox(homogeneous=False, spacing=10)
label = gtk.Label("<b>Uptime:</b>")
label.set_use_markup(True)
hbox.add(label)
self.uptime = gtk.Label(" ")
self.uptime.set_use_markup(True)
hbox.add(self.uptime)
label = gtk.Label("<b>External IP:</b>")
label.set_use_markup(True)
hbox.add(label)
self.external_ip = gtk.Label(" ")
self.external_ip.set_use_markup(True)
hbox.add(self.external_ip)
label = gtk.Label("<b>IN-Bytes:</b>")
label.set_use_markup(True)
hbox.add(label)
self.bytes_in = gtk.Label(" ")
self.bytes_in.set_use_markup(True)
hbox.add(self.bytes_in)
label = gtk.Label("<b>OUT-Bytes:</b>")
label.set_use_markup(True)
hbox.add(label)
self.bytes_out = gtk.Label(" ")
self.bytes_out.set_use_markup(True)
hbox.add(self.bytes_out)
vbox.pack_start(hbox,False,False,2)
hbox = gtk.HBox(homogeneous=False, spacing=10)
label = gtk.Label("<b>Port-Mappings:</b>")
label.set_use_markup(True)
hbox.add(label)
vbox.pack_start(hbox,False,False,2)
self.nat_store = gtk.ListStore(str,str,str,str,str,str,str,str,str)
self.nat_view = gtk.TreeView(self.nat_store)
self.nat_view.connect("button_press_event", self.button_action)
i = 0
for c in ['index','enabled','protocol','remote host','external port','internal host','internal port','lease duration','description']:
column = gtk.TreeViewColumn(c)
self.nat_view.append_column(column)
text_cell = gtk.CellRendererText()
column.pack_start(text_cell, True)
column.add_attribute(text_cell, "text", i)
i += 1
vbox.pack_start(self.nat_view,expand=True,fill=True)
self.window.add(vbox)
self.window.show_all()
self.wan_device = None
self.wan_connection_device = None
try:
self.wan_device = self.device.get_embedded_device_by_type('WANDevice')[0]
print self.wan_device
service = self.wan_device.get_service_by_type('WANCommonInterfaceConfig')
service.subscribe_for_variable('PhysicalLinkStatus', callback=self.state_variable_change)
self.get_traffic_loop = task.LoopingCall(self.get_traffic,service)
self.get_traffic_loop.start(10,now=True)
except IndexError:
pass
if self.wan_device != None:
try:
self.wan_connection_device = self.wan_device.get_embedded_device_by_type('WANConnectionDevice')[0]
service = self.wan_connection_device.get_service_by_type(['WANIPConnection','WANPPPConnection'])
service.subscribe_for_variable('PortMappingNumberOfEntries', callback=self.state_variable_change)
service.subscribe_for_variable('ExternalIPAddress', callback=self.state_variable_change)
self.get_state_loop = task.LoopingCall(self.get_state,service)
self.get_state_loop.start(10,now=True)
except IndexError:
pass
def button_action(self, widget, event):
x = int(event.x)
y = int(event.y)
path = self.nat_view.get_path_at_pos(x, y)
if event.button == 3:
menu = gtk.Menu()
item = gtk.MenuItem("add new port-mapping...")
item.set_sensitive(False)
menu.append(item)
if path != None:
row_path,column,_,_ = path
iter = self.nat_store.get_iter(row_path)
selection = self.nat_view.get_selection()
if not selection.path_is_selected(row_path):
self.nat_view.set_cursor(row_path,column,False)
item = gtk.MenuItem("modify port-mapping...")
item.set_sensitive(False)
menu.append(item)
item = gtk.MenuItem("delete port-mapping...")
item.set_sensitive(True)
protocol,remote_host,external_port = self.nat_store.get(iter,2,3,4)
item.connect("activate", self.delete_mapping,protocol,remote_host,external_port)
menu.append(item)
menu.show_all()
menu.popup(None,None,None,event.button,event.time)
return True
def delete_mapping(self,widget,protocol,remote_host,external_port):
service = self.wan_connection_device.get_service_by_type(['WANIPConnection','WANPPPConnection'])
action = service.get_action('DeletePortMapping')
if action != None:
d = action.call(NewRemoteHost=remote_host,NewExternalPort=external_port,NewProtocol=protocol)
d.addCallback(self.handle_result)
d.addErrback(self.handle_error)
def hide(self,w,e):
try:
self.get_traffic_loop.stop()
except:
pass
try:
self.get_state_loop.stop()
except:
pass
w.hide()
return True
def state_variable_change(self,variable):
print "%s %r" % (variable.name, variable.value)
if variable.name == "PhysicalLinkStatus":
if variable.value.lower() == 'up':
self.link_state_image.set_from_pixbuf(self.link_up_icon)
else:
self.link_state_image.set_from_pixbuf(self.link_down_icon)
def request_cb(r):
#print r
self.link_type.set_markup("<b>Type:</b> %s (%s/%s)" % (r['NewWANAccessType'],r['NewLayer1DownstreamMaxBitRate'],r['NewLayer1UpstreamMaxBitRate']))
action = variable.service.get_action('GetCommonLinkProperties')
d = action.call()
d.addCallback(request_cb)
d.addErrback(self.handle_error)
elif variable.name == "PortMappingNumberOfEntries":
self.nat_store.clear()
if type(variable.value) == int and variable.value > 0:
l = []
for i in range(variable.value):
action = variable.service.get_action('GetGenericPortMappingEntry')
d = action.call(NewPortMappingIndex=i)
def add_index(r,index):
r['NewPortMappingIndex'] = index
return r
d.addCallback(add_index,i+1)
d.addErrback(self.handle_error)
l.append(d)
def request_cb(r,last_updated_timestamp,v):
#print r
#print last_updated_timestamp == v.last_time_touched,last_updated_timestamp,v.last_time_touched
if last_updated_timestamp == v.last_time_touched:
mappings = [m[1] for m in r if m[0] == True]
mappings.sort(cmp=lambda x,y : cmp(x['NewPortMappingIndex'],y['NewPortMappingIndex']))
for mapping in mappings:
#print mapping
self.nat_store.append([mapping['NewPortMappingIndex'],
mapping['NewEnabled'],
mapping['NewProtocol'],
mapping['NewRemoteHost'],
mapping['NewExternalPort'],
mapping['NewInternalClient'],
mapping['NewInternalPort'],
mapping['NewLeaseDuration'],
mapping['NewPortMappingDescription']
])
dl = defer.DeferredList(l)
dl.addCallback(request_cb,variable.last_time_touched,variable)
dl.addErrback(self.handle_error)
elif variable.name == "ExternalIPAddress":
self.external_ip.set_markup(variable.value)
def get_traffic(self,service):
def request_cb(r,item,argument):
item.set_markup(r[argument])
action = service.get_action('GetTotalBytesReceived')
if action != None:
d = action.call()
d.addCallback(request_cb,self.bytes_in,'NewTotalBytesReceived')
d.addErrback(self.handle_error)
action = service.get_action('GetTotalBytesSent')
if action != None:
d = action.call()
d.addCallback(request_cb,self.bytes_out,'NewTotalBytesSent')
d.addErrback(self.handle_error)
def get_state(self,service):
def request_cb(r):
#print r
self.uptime.set_markup(r['NewUptime'])
action = service.get_action('GetStatusInfo')
if action != None:
d = action.call()
d.addCallback(request_cb)
d.addErrback(self.handle_error)
def handle_error(self,e):
print 'we have an error', e
return e
def handle_result(self,r):
print "done", r
return r
if __name__ == '__main__':
IGD.hide = lambda x,y,z: reactor.stop()
i = IGDWidget(None,None)
reactor.run()
|
|
#!/usr/bin/env python
# encoding: utf-8
# Default parameters work for formation_of_diffractons/
import numpy as np
import clawpack.petclaw as pyclaw
# Medium structure
medium_type='piecewise-constant'
def qinit(state,A,x0,y0,varx,vary):
r""" Set initial conditions:
Gaussian stress, zero velocities."""
x = state.grid.x.centers; y = state.grid.y.centers
[yy,xx]=np.meshgrid(y,x)
stress = A*np.exp(-(xx-x0)**2/(2*varx))*np.exp(-(yy-x0)**2/(2*vary))
#parameters from aux
stress_rel=state.aux[2,:]
K=state.aux[1,:]
#initial condition
state.q[0,:,:] = np.where(stress_rel==1,1,0) * stress/K \
+np.where(stress_rel==2,1,0) * np.log(stress+1)/K \
+np.where(stress_rel==3,1,0) * (np.sqrt(4*stress+1)-1)/(2*K)
state.q[1,:,:]=0; state.q[2,:,:]=0
def setaux(x,y, KA, KB, rhoA, rhoB, stress_rel):
r"""Return an array containing the values of the material
coefficients.
aux[0,i,j] = rho(x_i, y_j) (material density)
aux[1,i,j] = K(x_i, y_j) (bulk modulus)
aux[2,i,j] = stress-strain relation type at (x_i, y_j)
"""
aux = np.empty((4,len(x),len(y)), order='F')
if medium_type == 'piecewise-constant':
yfrac = y - np.floor(y)
xfrac = x - np.floor(x)
# create a meshgrid out of xfrac and yfrac
[yf,xf] = np.meshgrid(yfrac,xfrac)
# density
aux[0,:,:] = rhoA*(yf<=0.25) + rhoA*(yf>=0.75) + rhoB*(0.25<yf)*(yf<0.75)
#Young's modulus
aux[1,:,:] = KA * (yf<=0.25) + KA * (yf>=0.75) + KB * (0.25<yf)*(yf<0.75)
# Stress-strain relation
aux[2,:,:] = stress_rel
elif medium_type == 'sinusoidal' or medium_type == 'smooth_checkerboard':
[yy,xx]=np.meshgrid(y,x)
Amp_p=np.abs(rhoA-rhoB)/2; offset_p=(rhoA+rhoB)/2
Amp_E=np.abs(KA-KB)/2; offset_E=(KA+KB)/2
if medium_type == 'sinusoidal':
frec_x=2*np.pi; frec_y=2*np.pi
fun=np.sin(frec_x*xx)*np.sin(frec_y*yy)
else:
sharpness = 10
fun_x=xx*0; fun_y=yy*0
for i in xrange(0,1+int(np.ceil((x[-1]-x[0])/(0.5)))):
fun_x=fun_x+(-1)**i*np.tanh(sharpness*(xx-i*0.5))
for i in xrange(0,1+int(np.ceil((y[-1]-y[0])/(0.5)))):
fun_y=fun_y+(-1)**i*np.tanh(sharpness*(yy-i*0.5))
fun=fun_x*fun_y
aux[0,:,:]=Amp_p*fun+offset_p
aux[1,:,:]=Amp_E*fun+offset_E
aux[2,:,:]=stress_rel
return aux
def b4step(solver,state):
r"""This routine does three things:
1. Put in aux[3,:,:] the value of q[0,:,:] (eps).
This is required in rptpv.f.
Only used by classic (not SharpClaw).
2. Set the solution to zero in half of the domain at a specified time.
3. Change the boundary conditions to periodic at a specified time.
"""
state.aux[3,:,:] = state.q[0,:,:]
# To set to 0 1st 1/2 of the domain. Used in rect domains with PBC in x
if state.problem_data['turnZero_half_2D']==1:
if state.t>=state.problem_data['t_turnZero'] and state.t<=state.problem_data['t_turnZero']+1:
Y,X = np.meshgrid(state.grid.y.centers,state.grid.x.centers)
state.q = state.q * (X>25)
if state.problem_data['change_BCs']==1:
if state.t>=state.problem_data['t_change_BCs']:
solver.bc_lower[0]=pyclaw.BC.periodic
solver.bc_upper[0]=pyclaw.BC.periodic
solver.aux_bc_lower[0]=pyclaw.BC.periodic
solver.aux_bc_upper[0]=pyclaw.BC.periodic
def compute_stress(state):
""" Compute stress from strain and store in state.p."""
K=state.aux[1,:,:]
stress_rel=state.aux[2,:,:]
eps=state.q[0,:,:]
state.p[0,:,:] = np.where(stress_rel==1,1,0) * K*eps \
+np.where(stress_rel==2,1,0) * (np.exp(eps*K)-1) \
+np.where(stress_rel==3,1,0) * K*eps+K**2*eps**2
def total_energy(state):
rho = state.aux[0,:,:]; K = state.aux[1,:,:]
u = state.q[1,:,:]/rho
v = state.q[2,:,:]/rho
kinetic=rho * (u**2 + v**2)/2.
eps = state.q[0,:,:]
sigma = np.exp(K*eps) - 1.
potential = (sigma-np.log(sigma+1.))/K
dx=state.grid.delta[0]; dy=state.grid.delta[1]
state.F[0,:,:] = (potential+kinetic)*dx*dy
def gauge_stress(q,aux):
p = np.exp(q[0]*aux[1])-1
return [p]
def moving_wall_BC(state,dim,t,qbc,num_ghost):
if dim.on_lower_boundary:
qbc[0,:num_ghost,:]=qbc[0,num_ghost,:]
qbc[2,:num_ghost,:]=qbc[2,num_ghost,:]
t0=(t-10)/10
a1=0.2;
if abs(t0)<=1.: vwall = -a1/2.*(1.+np.cos(t0*np.pi))
else: vwall=0.
for ibc in xrange(num_ghost-1):
qbc[1,num_ghost-ibc-1,:] = 2*vwall - qbc[1,num_ghost+ibc,:]
def setup(KA=17./2, KB=17./32, rhoA=1., rhoB=1., stress_rel=1,
oscillating_wall=False, square_domain=True,
initial_amplitude=1, varx=2., Nx=32, Ny=128, tfinal=100,
outdir='./_output',solver_type='classic'):
"""
Solve the p-system in 2D with variable coefficients
"""
# Domain
x_lower=0.; y_lower=0.;
if square_domain:
x_upper = 100.
y_upper = 100.
bc_x_lower=pyclaw.BC.wall; bc_x_upper=pyclaw.BC.extrap
bc_y_lower=pyclaw.BC.wall; bc_y_upper=pyclaw.BC.extrap
else:
x_upper = 200.
y_upper = 1.
bc_x_lower=pyclaw.BC.wall; bc_x_upper=pyclaw.BC.extrap
bc_y_lower=pyclaw.BC.periodic; bc_y_upper=pyclaw.BC.periodic
mx = (x_upper-x_lower)*Nx
my = (y_upper-y_lower)*Ny
# Initial condition parameters
x0 = 0. # Center of initial perturbation
y0 = 0. # Center of initial perturbation
vary = 2.0 # Variance (in y) of initial Gaussian
# Stress-strain relation:
# 1: linear
# 2: nonlinear (exponential)
# 3: nonlinear (quadratic)
# Optionally change x BCs to periodic at specified time:
change_BCs = 0
t_change_BCs = 0
# Optionally zero out 1st half of the domain at specified time:
turnZero_half_2D = 0
t_turnZero = 50
num_output_times = tfinal
if solver_type=='classic':
solver = pyclaw.ClawSolver2D()
solver.limiters = pyclaw.limiters.tvd.MC
solver.cfl_max = 0.45
solver.cfl_desired = 0.4
solver.dimensional_split=False
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
solver.cfl_max = 2.5
solver.cfl_desired = 2.45
# stress-strain relation:
# = 1 for linear
# = 2 for exponential
# = 3 for quadratic
if stress_rel < 3:
from clawpack import riemann
solver.rp = riemann.psystem_2D
elif stress_rel==3:
import psystem_quadratic_2D
solver.rp = psystem_quadratic_2D
solver.num_eqn = 3
solver.num_waves = 2
solver.bc_lower = [bc_x_lower, bc_y_lower]
solver.bc_upper = [bc_x_upper, bc_y_upper]
solver.aux_bc_lower = [bc_x_lower, bc_y_lower]
solver.aux_bc_upper = [bc_x_upper, bc_y_upper]
if oscillating_wall:
# This code assumes we'd never use an initial condition
# and this boundary condition together.
initial_amplitude = 0
solver.user_bc_lower = moving_wall_BC
solver.aux_bc_lower[0] = pyclaw.BC.extrap
solver.bc_lower[0] = pyclaw.BC.custom
solver.fwave = True
solver.before_step = b4step
claw = pyclaw.Controller()
claw.tfinal = tfinal
claw.solver = solver
claw.outdir = outdir
claw.num_output_times = num_output_times
# Domain
x = pyclaw.Dimension('x',x_lower,x_upper,mx)
y = pyclaw.Dimension('y',y_lower,y_upper,my)
domain = pyclaw.Domain( [x,y] )
num_aux = 4
state = pyclaw.State(domain,solver.num_eqn,num_aux)
#Set global parameters
state.problem_data = {}
state.problem_data['turnZero_half_2D'] = turnZero_half_2D
state.problem_data['t_turnZero'] = t_turnZero
state.problem_data['change_BCs'] = change_BCs
state.problem_data['t_change_BCs'] = t_change_BCs
grid = state.grid
state.aux = setaux(grid.x.centers,grid.y.centers, KA, KB, rhoA, rhoB, stress_rel)
qinit(state,initial_amplitude ,x0,y0,varx,vary)
claw.solution = pyclaw.Solution(state,domain)
claw.num_output_times = num_output_times
state.mp = 1
state.mF = 1
claw.compute_p = compute_stress
claw.compute_F = total_energy
claw.solution.state.grid.add_gauges([[25.0,0.75],[50.0,0.75],[75.0,0.75],[25.0,1.25],[50.0,1.25],[75.0,1.25]])
solver.compute_gauge_values = gauge_stress
# Do we need this?
claw.solution.state.keep_gauges = True
# This saves time on Shaheen, but otherwise one may wish to turn it on:
claw.write_aux_init = False
#Solve
return claw
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps
import matplotlib
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for strain
plotfigure = plotdata.new_plotfigure(name='Stress', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Stress'
plotaxes.scaled = 'tight'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = stress
plotitem.pcolor_cmap = matplotlib.cm.RdBu
plotitem.pcolor_cmin = -0.05
plotitem.pcolor_cmax = 0.05
plotitem.add_colorbar = True
return plotdata
def stress(current_data):
aux = setaux(current_data.x[:,0],current_data.y[0,:],KA=5./8, KB=5./2, rhoA=8./5, rhoB=2./5, stress_rel=1)
q = current_data.q
return aux[1,...]*q[0,...]
if __name__=="__main__":
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(setup)
|
|
"""EasyClangComplete plugin for Sublime Text 3.
Provides completion suggestions for C/C++ languages based on clang
Attributes:
log (logging.Logger): logger for this module
"""
import sublime
import sublime_plugin
import logging
import shutil
from os import path
from .plugin import tools
from .plugin import view_config
from .plugin import flags_sources
from .plugin.utils import thread_pool
from .plugin.utils import progress_status
from .plugin.settings import settings_manager
from .plugin.settings import settings_storage
# reload the modules
# some aliases
SettingsManager = settings_manager.SettingsManager
SettingsStorage = settings_storage.SettingsStorage
ViewConfigManager = view_config.ViewConfigManager
SublBridge = tools.SublBridge
Tools = tools.Tools
MoonProgressStatus = progress_status.MoonProgressStatus
ColorSublimeProgressStatus = progress_status.ColorSublimeProgressStatus
NoneSublimeProgressStatus = progress_status.NoneSublimeProgressStatus
PosStatus = tools.PosStatus
CMakeFile = flags_sources.cmake_file.CMakeFile
CMakeFileCache = flags_sources.cmake_file.CMakeFileCache
ThreadPool = thread_pool.ThreadPool
ThreadJob = thread_pool.ThreadJob
log = logging.getLogger("ECC")
log.setLevel(logging.DEBUG)
log.propagate = False
formatter_default = logging.Formatter(
'[%(name)s:%(levelname)-7s]: %(message)s')
formatter_verbose = logging.Formatter(
'[%(name)s:%(levelname)s]:[%(filename)s]:[%(funcName)s]:'
'[%(threadName)s]: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter_default)
if not log.hasHandlers():
log.addHandler(ch)
handle_plugin_loaded_function = None
handle_plugin_unloaded_function = None
def plugin_loaded():
"""Called right after sublime api is ready to use.
We need it to initialize all the different classes that encapsulate
functionality. We can only properly init them after sublime api is
available."""
tools.Reloader.reload_all()
handle_plugin_loaded_function()
def plugin_unloaded():
"""Called right before the package was unloaded."""
handle_plugin_unloaded_function()
class CleanCmakeCommand(sublime_plugin.TextCommand):
"""Command that cleans cmake build directory."""
def run(self, edit):
"""Run clean command.
Detects if there is a CMakeLists.txt associated to current view and
cleans all related information in case there is one.
"""
if not Tools.is_valid_view(self.view):
return
import gc
file_path = self.view.file_name()
cmake_cache = CMakeFileCache()
try:
cmake_file_path = cmake_cache[file_path]
log.debug("Cleaning file: '%s'", cmake_file_path)
del cmake_cache[file_path]
del cmake_cache[cmake_file_path]
# Better safe than sorry. Cleanup!
gc.collect()
temp_proj_dir = CMakeFile.unique_folder_name(cmake_file_path)
if path.exists(temp_proj_dir):
log.debug("Cleaning build directory: '%s'", temp_proj_dir)
shutil.rmtree(temp_proj_dir, ignore_errors=True)
except KeyError:
log.debug("Nothing to clean")
class EasyClangComplete(sublime_plugin.EventListener):
"""Base class for this plugin.
Most of the functionality is delegated.
"""
thread_pool = ThreadPool(max_workers=4)
CLEAR_JOB_TAG = "clear"
COMPLETE_JOB_TAG = "complete"
UPDATE_JOB_TAG = "update"
INFO_JOB_TAG = "info"
def __init__(self):
"""Initialize the object."""
super().__init__()
global handle_plugin_loaded_function
global handle_plugin_unloaded_function
handle_plugin_loaded_function = self.on_plugin_loaded
handle_plugin_unloaded_function = self.on_plugin_unloaded
# init instance variables to reasonable defaults
self.current_completions = None
self.current_job_id = None
self.settings_manager = None
self.view_config_manager = None
self.loaded = False
def on_plugin_unloaded(self):
"""Manage what we do when the plugin is unloaded."""
log.debug("plugin unloaded")
self.loaded = False
def on_plugin_loaded(self):
"""Called upon plugin load event."""
# init settings manager
self.loaded = True
log.debug("handle plugin loaded")
self.settings_manager = SettingsManager()
# self.on_settings_changed()
self.settings_manager.add_change_listener(self.on_settings_changed)
self.on_settings_changed()
# init view config manager
self.view_config_manager = ViewConfigManager()
# As the plugin has just loaded, we might have missed an activation
# event for the active view so completion will not work for it until
# re-activated. Force active view initialization in that case.
self.on_activated_async(sublime.active_window().active_view())
def on_settings_changed(self):
"""Called when any of the settings changes."""
log.debug("on settings changed handle")
if not self.loaded:
log.warning(
" cannot process settings change as plugin is not loaded")
return
if not self.settings_manager:
self.settings_manager = SettingsManager()
user_settings = self.settings_manager.user_settings()
if user_settings.verbose:
ch.setFormatter(formatter_verbose)
ch.setLevel(logging.DEBUG)
else:
ch.setFormatter(formatter_default)
ch.setLevel(logging.INFO)
if user_settings.need_reparse():
# stop processing this if the settings are still invalid
return
# set progress status
progress_style_tag = user_settings.progress_style
if progress_style_tag == SettingsStorage.MOON_STYLE_TAG:
progress_style = MoonProgressStatus()
elif progress_style_tag == SettingsStorage.COLOR_SUBLIME_STYLE_TAG:
progress_style = ColorSublimeProgressStatus()
else:
progress_style = NoneSublimeProgressStatus()
EasyClangComplete.thread_pool.progress_status = progress_style
def on_activated_async(self, view):
"""Called upon activating a view. Execution in a worker thread.
Args:
view (sublime.View): current view
"""
# disable on_activated_async when running tests
if view.settings().get("disable_easy_clang_complete"):
return
if not Tools.is_valid_view(view):
try:
EasyClangComplete.thread_pool.progress_status.erase_status()
except AttributeError as e:
log.debug("cannot clear status, %s", e)
return
EasyClangComplete.thread_pool.progress_status.showing = True
log.debug("on_activated_async view id %s", view.buffer_id())
settings = self.settings_manager.settings_for_view(view)
# All is taken care of. The view is built if needed.
job = ThreadJob(name=EasyClangComplete.UPDATE_JOB_TAG,
callback=EasyClangComplete.config_updated,
function=self.view_config_manager.load_for_view,
args=[view, settings])
EasyClangComplete.thread_pool.new_job(job)
def on_selection_modified(self, view):
"""Called when selection is modified. Executed in gui thread.
Args:
view (sublime.View): current view
"""
settings = self.settings_manager.settings_for_view(view)
if settings.errors_style == SettingsStorage.PHANTOMS_STYLE:
return
if Tools.is_valid_view(view):
(row, _) = SublBridge.cursor_pos(view)
view_config = self.view_config_manager.get_from_cache(view)
if not view_config:
return
if not view_config.completer:
return
view_config.completer.error_vis.show_popup_if_needed(view, row)
def on_modified_async(self, view):
"""Called in a worker thread when view is modified.
Args:
view (sublime.View): current view
"""
if Tools.is_valid_view(view):
log.debug("on_modified_async view id %s", view.buffer_id())
view_config = self.view_config_manager.get_from_cache(view)
if not view_config:
return
if not view_config.completer:
return
view_config.completer.error_vis.clear(view)
def on_post_save_async(self, view):
"""Executed in a worker thread on save.
Args:
view (sublime.View): current view
"""
# disable on_activated_async when running tests
if view.settings().get("disable_easy_clang_complete"):
return
if view.file_name().endswith('.sublime-project'):
if not self.settings_manager:
log.error("no settings manager, no cannot reload settings")
return
log.debug("Project file changed. Reloading settings.")
self.settings_manager.on_settings_changed()
if Tools.is_valid_view(view):
log.debug("saving view: %s", view.buffer_id())
settings = self.settings_manager.settings_for_view(view)
job = ThreadJob(name=EasyClangComplete.UPDATE_JOB_TAG,
callback=EasyClangComplete.config_updated,
function=self.view_config_manager.load_for_view,
args=[view, settings])
EasyClangComplete.thread_pool.new_job(job)
# invalidate current completions
self.current_completions = None
def on_close(self, view):
"""Called on closing the view.
Args:
view (sublime.View): current view
"""
if Tools.is_valid_view(view):
log.debug("closing view %s", view.buffer_id())
self.settings_manager.clear_for_view(view)
file_id = view.buffer_id()
job = ThreadJob(name=EasyClangComplete.CLEAR_JOB_TAG,
callback=EasyClangComplete.config_removed,
function=self.view_config_manager.clear_for_view,
args=[file_id])
EasyClangComplete.thread_pool.new_job(job)
@staticmethod
def config_removed(future):
"""Callback called when config has been removed for a view.
The corresponding view id is saved in future.result()
Args:
future (concurrent.Future): future holding id of removed view
"""
if future.done():
log.debug("removed config for id: %s", future.result())
elif future.cancelled():
log.debug("could not remove config -> cancelled")
@staticmethod
def config_updated(future):
"""Callback called when config has been updated for a view.
Args:
future (concurrent.Future): future holding config of updated view
"""
if future.done():
log.debug("updated config: %s", future.result())
elif future.cancelled():
log.debug("could not update config -> cancelled")
@staticmethod
def on_open_declaration(location):
"""Callback called when link to type is clicked in info popup.
Opens location with type declaration
"""
sublime.active_window().open_file(location, sublime.ENCODED_POSITION)
def info_finished(self, future):
"""Callback called when additional information for tag is available.
Creates popup containing information about text under the cursor
"""
if not future.done():
return
(tooltip_request, result) = future.result()
if result == "":
return
if not tooltip_request:
return
if tooltip_request.get_identifier() != self.current_job_id:
return
view = tooltip_request.get_view()
view.show_popup(result,
location=tooltip_request.get_trigger_position(),
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
max_width=1000,
on_navigate=self.on_open_declaration)
def completion_finished(self, future):
"""Callback called when completion async function has returned.
Checks if job id equals the one that is expected now and updates the
completion list that is going to be used in on_query_completions
Args:
future (concurrent.Future): future holding completion result
"""
if not future.done():
return
(completion_request, completions) = future.result()
if not completion_request:
return
if completion_request.get_identifier() != self.current_job_id:
return
active_view = sublime.active_window().active_view()
if completion_request.is_suitable_for_view(active_view):
self.current_completions = completions
else:
log.debug("ignoring completions")
self.current_completions = []
if self.current_completions:
# we only want to trigger the autocompletion popup if there
# are new completions to show there. Otherwise let it be.
SublBridge.show_auto_complete(active_view)
def on_hover(self, view, point, hover_zone):
"""Function that is called when mouse pointer hovers over text.
Triggers showing popup with additional information about element under
cursor.
"""
if not Tools.is_valid_view(view):
return
settings = self.settings_manager.settings_for_view(view)
if not settings.show_type_info:
return
if hover_zone != sublime.HOVER_TEXT:
return
tooltip_request = tools.ActionRequest(view, point)
self.current_job_id = tooltip_request.get_identifier()
job = ThreadJob(name=EasyClangComplete.INFO_JOB_TAG,
callback=self.info_finished,
function=self.view_config_manager.trigger_info,
args=[view, tooltip_request])
EasyClangComplete.thread_pool.new_job(job)
def on_query_completions(self, view, prefix, locations):
"""Function that is called when user queries completions in the code.
Args:
view (sublime.View): current view
prefix (TYPE): Description
locations (list[int]): positions of the cursor (first if many).
Returns:
sublime.Completions: completions with a flag
"""
if not Tools.is_valid_view(view):
log.debug("not a valid view")
return Tools.SHOW_DEFAULT_COMPLETIONS
log.debug("on_query_completions view id %s", view.buffer_id())
log.debug("prefix: %s, locations: %s" % (prefix, locations))
trigger_pos = locations[0] - len(prefix)
completion_request = tools.ActionRequest(view, trigger_pos)
current_pos_id = completion_request.get_identifier()
log.debug("this position has identifier: '%s'", current_pos_id)
# get settings for this view
settings = self.settings_manager.settings_for_view(view)
if self.current_completions and current_pos_id == self.current_job_id:
log.debug("returning existing completions")
return SublBridge.format_completions(
self.current_completions,
settings.hide_default_completions)
# Verify that character under the cursor is one allowed trigger
pos_status = Tools.get_pos_status(trigger_pos, view, settings)
if pos_status == PosStatus.WRONG_TRIGGER:
# we are at a wrong trigger, remove all completions from the list
log.debug("wrong trigger")
log.debug("hiding default completions")
return Tools.HIDE_DEFAULT_COMPLETIONS
if pos_status == PosStatus.COMPLETION_NOT_NEEDED:
log.debug("completion not needed")
# show default completions for now if allowed
if settings.hide_default_completions:
log.debug("hiding default completions")
return Tools.HIDE_DEFAULT_COMPLETIONS
log.debug("showing default completions")
return Tools.SHOW_DEFAULT_COMPLETIONS
self.current_job_id = current_pos_id
log.debug("starting async auto_complete with id: %s",
self.current_job_id)
# submit async completion job
job = ThreadJob(name=EasyClangComplete.COMPLETE_JOB_TAG,
callback=self.completion_finished,
function=self.view_config_manager.trigger_completion,
args=[view, completion_request])
EasyClangComplete.thread_pool.new_job(job)
# show default completions for now if allowed
if settings.hide_default_completions:
log.debug("hiding default completions")
return Tools.HIDE_DEFAULT_COMPLETIONS
log.debug("showing default completions")
return Tools.SHOW_DEFAULT_COMPLETIONS
|
|
# -*- coding: utf-8 -*-
import mock
import unittest
from nose.tools import * # noqa
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from framework.auth import Auth
from website.addons.github.exceptions import NotFoundError
from website.addons.github import settings as github_settings
from website.addons.github.tests.factories import GitHubOauthSettingsFactory
from website.addons.github.model import AddonGitHubUserSettings
from website.addons.github.model import AddonGitHubNodeSettings
from website.addons.github.model import AddonGitHubOauthSettings
from .utils import create_mock_github
mock_github = create_mock_github()
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHub.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_private_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
assert_true(message)
assert_in('made private', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_public_authenticated(self, mock_set_privacy):
mock_set_privacy.return_value = {}
message = self.node_settings.after_set_privacy(
self.project, 'public'
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
False,
)
assert_true(message)
assert_in('made public', message.lower())
@unittest.skipIf(not github_settings.SET_PRIVACY, 'Setting privacy is disabled.')
@mock.patch('website.addons.github.api.GitHub.repo')
@mock.patch('website.addons.github.api.GitHub.set_privacy')
def test_after_set_privacy_not_authenticated(self, mock_set_privacy, mock_repo):
mock_set_privacy.return_value = {'errors': ['it broke']}
mock_repo.return_value = {'private': True}
message = self.node_settings.after_set_privacy(
self.project, 'private',
)
mock_set_privacy.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
True,
)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
assert_in('could not set privacy', message.lower())
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=None,
auth=Auth(user=self.project.creator),
template='Template1',
data='hodor'
)
assert_false(registration.has_addon('github'))
class TestAddonGithubUserSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user_settings = AddonGitHubUserSettings()
self.oauth_settings = AddonGitHubOauthSettings()
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
def test_repr(self):
self.user_settings.owner = UserFactory()
assert_in(self.user_settings.owner._id, repr(self.user_settings))
oauth_settings = GitHubOauthSettingsFactory()
def test_public_id_is_none_if_no_oauth_settings_attached(self):
self.user_settings.oauth_settings = None
self.user_settings.save()
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1053
assert_is_none(self.user_settings.public_id)
def test_github_user_name(self):
self.oauth_settings.github_user_name = "test user name"
self.oauth_settings.save()
assert_equal(self.user_settings.github_user_name, "test user name")
def test_oauth_access_token(self):
self.oauth_settings.oauth_access_token = "test access token"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_access_token, "test access token")
def test_oauth_token_type(self):
self.oauth_settings.oauth_token_type = "test token type"
self.oauth_settings.save()
assert_equal(self.user_settings.oauth_token_type, "test token type")
@mock.patch('website.addons.github.api.GitHub.revoke_token')
def test_clear_auth(self, mock_revoke_token):
mock_revoke_token.return_value = True
self.user_settings.clear_auth(save=True)
assert_false(self.user_settings.github_user_name)
assert_false(self.user_settings.oauth_token_type)
assert_false(self.user_settings.oauth_access_token)
assert_false(self.user_settings.oauth_settings)
class TestAddonGithubNodeSettings(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
self.oauth_settings.github_user_id = 'testuser'
self.oauth_settings.save()
self.user_settings.oauth_settings = self.oauth_settings
self.user_settings.save()
self.node_settings = AddonGitHubNodeSettings(
owner=ProjectFactory(),
user='chrisseto',
repo='openpokemon',
user_settings=self.user_settings,
)
self.node_settings.save()
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_false(self):
self.node_settings.user = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_repo_false(self):
self.node_settings.repo = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('website.addons.github.api.GitHub.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
def test_to_json_noauthorizing_authed_user(self):
user = UserFactory()
user.add_addon('github')
user_settings = user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings(oauth_access_token='foobar')
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_settings.oauth_settings = self.oauth_settings
user_settings.save()
self.node_settings.to_json(user)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import shutil
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = DNS:{hostname}
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
default_md = sha1
default_days = 3600
default_crl_days = 3600
certificate = pycacert.pem
private_key = pycakey.pem
serial = $dir/serial
RANDFILE = $dir/.rand
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname, sign=False):
print("creating cert for " + hostname)
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
with open(req_file, 'w') as f:
f.write(req_template.format(hostname=hostname))
args = ['req', '-new', '-days', '3650', '-nodes',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-config', req_file]
if sign:
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
reqfile = f.name
args += ['-out', reqfile ]
else:
args += ['-x509', '-out', cert_file ]
check_call(['openssl'] + args)
if sign:
args = ['ca', '-config', req_file, '-out', cert_file, '-outdir', 'cadir',
'-policy', 'policy_anything', '-batch', '-infiles', reqfile ]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
TMP_CADIR = 'cadir'
def unmake_ca():
shutil.rmtree(TMP_CADIR)
def make_ca():
os.mkdir(TMP_CADIR)
with open(os.path.join('cadir','index.txt'),'a+') as f:
pass # empty file
with open(os.path.join('cadir','crl.txt'),'a+') as f:
f.write("00")
with open(os.path.join('cadir','index.txt.attr'),'w+') as f:
f.write('unique_subject = no')
with tempfile.NamedTemporaryFile("w") as t:
t.write(req_template.format(hostname='our-ca-server'))
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = ['req', '-new', '-days', '3650', '-extensions', 'v3_ca', '-nodes',
'-newkey', 'rsa:2048', '-keyout', 'pycakey.pem',
'-out', f.name,
'-subj', '/C=XY/L=Castle Anthrax/O=Python Software Foundation CA/CN=our-ca-server']
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-create_serial',
'-out', 'pycacert.pem', '-batch', '-outdir', TMP_CADIR,
'-keyfile', 'pycakey.pem', '-days', '3650',
'-selfsign', '-extensions', 'v3_ca', '-infiles', f.name ]
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-gencrl', '-out', 'revocation.crl']
check_call(['openssl'] + args)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
print("password protecting ssl_key.pem in ssl_key.passwd.pem")
check_call(['openssl','rsa','-in','ssl_key.pem','-out','ssl_key.passwd.pem','-des3','-passout','pass:somepass'])
check_call(['openssl','rsa','-in','ssl_key.pem','-out','keycert.passwd.pem','-des3','-passout','pass:somepass'])
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
with open('keycert.passwd.pem', 'a+') as f:
f.write(cert)
# For certificate matching tests
make_ca()
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('localhost', True)
with open('keycert3.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('fakehostname', True)
with open('keycert4.pem', 'w') as f:
f.write(key)
f.write(cert)
unmake_ca()
print("\n\nPlease change the values in test_ssl.py, test_parse_cert function related to notAfter,notBefore and serialNumber")
check_call(['openssl','x509','-in','keycert.pem','-dates','-serial','-noout'])
=======
"""Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import shutil
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = DNS:{hostname}
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
default_md = sha1
default_days = 3600
default_crl_days = 3600
certificate = pycacert.pem
private_key = pycakey.pem
serial = $dir/serial
RANDFILE = $dir/.rand
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname, sign=False):
print("creating cert for " + hostname)
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
with open(req_file, 'w') as f:
f.write(req_template.format(hostname=hostname))
args = ['req', '-new', '-days', '3650', '-nodes',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-config', req_file]
if sign:
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
reqfile = f.name
args += ['-out', reqfile ]
else:
args += ['-x509', '-out', cert_file ]
check_call(['openssl'] + args)
if sign:
args = ['ca', '-config', req_file, '-out', cert_file, '-outdir', 'cadir',
'-policy', 'policy_anything', '-batch', '-infiles', reqfile ]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
TMP_CADIR = 'cadir'
def unmake_ca():
shutil.rmtree(TMP_CADIR)
def make_ca():
os.mkdir(TMP_CADIR)
with open(os.path.join('cadir','index.txt'),'a+') as f:
pass # empty file
with open(os.path.join('cadir','crl.txt'),'a+') as f:
f.write("00")
with open(os.path.join('cadir','index.txt.attr'),'w+') as f:
f.write('unique_subject = no')
with tempfile.NamedTemporaryFile("w") as t:
t.write(req_template.format(hostname='our-ca-server'))
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = ['req', '-new', '-days', '3650', '-extensions', 'v3_ca', '-nodes',
'-newkey', 'rsa:2048', '-keyout', 'pycakey.pem',
'-out', f.name,
'-subj', '/C=XY/L=Castle Anthrax/O=Python Software Foundation CA/CN=our-ca-server']
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-create_serial',
'-out', 'pycacert.pem', '-batch', '-outdir', TMP_CADIR,
'-keyfile', 'pycakey.pem', '-days', '3650',
'-selfsign', '-extensions', 'v3_ca', '-infiles', f.name ]
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-gencrl', '-out', 'revocation.crl']
check_call(['openssl'] + args)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
print("password protecting ssl_key.pem in ssl_key.passwd.pem")
check_call(['openssl','rsa','-in','ssl_key.pem','-out','ssl_key.passwd.pem','-des3','-passout','pass:somepass'])
check_call(['openssl','rsa','-in','ssl_key.pem','-out','keycert.passwd.pem','-des3','-passout','pass:somepass'])
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
with open('keycert.passwd.pem', 'a+') as f:
f.write(cert)
# For certificate matching tests
make_ca()
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('localhost', True)
with open('keycert3.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('fakehostname', True)
with open('keycert4.pem', 'w') as f:
f.write(key)
f.write(cert)
unmake_ca()
print("\n\nPlease change the values in test_ssl.py, test_parse_cert function related to notAfter,notBefore and serialNumber")
check_call(['openssl','x509','-in','keycert.pem','-dates','-serial','-noout'])
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import shutil
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = DNS:{hostname}
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
default_md = sha1
default_days = 3600
default_crl_days = 3600
certificate = pycacert.pem
private_key = pycakey.pem
serial = $dir/serial
RANDFILE = $dir/.rand
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname, sign=False):
print("creating cert for " + hostname)
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
with open(req_file, 'w') as f:
f.write(req_template.format(hostname=hostname))
args = ['req', '-new', '-days', '3650', '-nodes',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-config', req_file]
if sign:
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
reqfile = f.name
args += ['-out', reqfile ]
else:
args += ['-x509', '-out', cert_file ]
check_call(['openssl'] + args)
if sign:
args = ['ca', '-config', req_file, '-out', cert_file, '-outdir', 'cadir',
'-policy', 'policy_anything', '-batch', '-infiles', reqfile ]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
TMP_CADIR = 'cadir'
def unmake_ca():
shutil.rmtree(TMP_CADIR)
def make_ca():
os.mkdir(TMP_CADIR)
with open(os.path.join('cadir','index.txt'),'a+') as f:
pass # empty file
with open(os.path.join('cadir','crl.txt'),'a+') as f:
f.write("00")
with open(os.path.join('cadir','index.txt.attr'),'w+') as f:
f.write('unique_subject = no')
with tempfile.NamedTemporaryFile("w") as t:
t.write(req_template.format(hostname='our-ca-server'))
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = ['req', '-new', '-days', '3650', '-extensions', 'v3_ca', '-nodes',
'-newkey', 'rsa:2048', '-keyout', 'pycakey.pem',
'-out', f.name,
'-subj', '/C=XY/L=Castle Anthrax/O=Python Software Foundation CA/CN=our-ca-server']
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-create_serial',
'-out', 'pycacert.pem', '-batch', '-outdir', TMP_CADIR,
'-keyfile', 'pycakey.pem', '-days', '3650',
'-selfsign', '-extensions', 'v3_ca', '-infiles', f.name ]
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-gencrl', '-out', 'revocation.crl']
check_call(['openssl'] + args)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
print("password protecting ssl_key.pem in ssl_key.passwd.pem")
check_call(['openssl','rsa','-in','ssl_key.pem','-out','ssl_key.passwd.pem','-des3','-passout','pass:somepass'])
check_call(['openssl','rsa','-in','ssl_key.pem','-out','keycert.passwd.pem','-des3','-passout','pass:somepass'])
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
with open('keycert.passwd.pem', 'a+') as f:
f.write(cert)
# For certificate matching tests
make_ca()
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('localhost', True)
with open('keycert3.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('fakehostname', True)
with open('keycert4.pem', 'w') as f:
f.write(key)
f.write(cert)
unmake_ca()
print("\n\nPlease change the values in test_ssl.py, test_parse_cert function related to notAfter,notBefore and serialNumber")
check_call(['openssl','x509','-in','keycert.pem','-dates','-serial','-noout'])
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.layers import pooling as tf_pooling_layers
class MaxPooling1D(tf_pooling_layers.MaxPooling1D, Layer):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
if strides is None:
strides = pool_size
super(MaxPooling1D, self).__init__(pool_size, strides, padding, **kwargs)
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding
}
base_config = super(MaxPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling1D(tf_pooling_layers.AveragePooling1D, Layer):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
if strides is None:
strides = pool_size
super(AveragePooling1D, self).__init__(pool_size, strides, padding,
**kwargs)
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding
}
base_config = super(AveragePooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaxPooling2D(tf_pooling_layers.MaxPooling2D, Layer):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(MaxPooling2D, self).__init__(pool_size, strides, padding, data_format,
**kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(MaxPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling2D(tf_pooling_layers.AveragePooling2D, Layer):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(AveragePooling2D, self).__init__(pool_size, strides, padding,
data_format, **kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(AveragePooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaxPooling3D(tf_pooling_layers.MaxPooling3D, Layer):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(MaxPooling3D, self).__init__(pool_size, strides, padding, data_format,
**kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(MaxPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling3D(tf_pooling_layers.AveragePooling3D, Layer):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(AveragePooling3D, self).__init__(pool_size, strides, padding,
data_format, **kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(AveragePooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers.
"""
def __init__(self, **kwargs):
super(_GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
class GlobalAveragePooling1D(_GlobalPooling1D):
"""Global average pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
return K.mean(inputs, axis=1)
class GlobalMaxPooling1D(_GlobalPooling1D):
"""Global max pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
return K.max(inputs, axis=1)
class _GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(_GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(_GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAveragePooling2D(_GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.mean(inputs, axis=[1, 2])
else:
return K.mean(inputs, axis=[2, 3])
class GlobalMaxPooling2D(_GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.max(inputs, axis=[1, 2])
else:
return K.max(inputs, axis=[2, 3])
class _GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(_GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(_GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAveragePooling3D(_GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.mean(inputs, axis=[1, 2, 3])
else:
return K.mean(inputs, axis=[2, 3, 4])
class GlobalMaxPooling3D(_GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.max(inputs, axis=[1, 2, 3])
else:
return K.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
|
|
"""Platform for retrieving meteorological data from Environment Canada."""
import datetime
import re
from env_canada import ECData # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
CONF_FORECAST = "forecast"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_FORECAST, default="daily"): vol.In(["daily", "hourly"]),
}
)
# Icon codes from http://dd.weatheroffice.ec.gc.ca/citypage_weather/
# docs/current_conditions_icon_code_descriptions_e.csv
ICON_CONDITION_MAP = {
ATTR_CONDITION_SUNNY: [0, 1],
ATTR_CONDITION_CLEAR_NIGHT: [30, 31],
ATTR_CONDITION_PARTLYCLOUDY: [2, 3, 4, 5, 22, 32, 33, 34, 35],
ATTR_CONDITION_CLOUDY: [10],
ATTR_CONDITION_RAINY: [6, 9, 11, 12, 28, 36],
ATTR_CONDITION_LIGHTNING_RAINY: [19, 39, 46, 47],
ATTR_CONDITION_POURING: [13],
ATTR_CONDITION_SNOWY_RAINY: [7, 14, 15, 27, 37],
ATTR_CONDITION_SNOWY: [8, 16, 17, 18, 25, 26, 38, 40],
ATTR_CONDITION_WINDY: [43],
ATTR_CONDITION_FOG: [20, 21, 23, 24, 44],
ATTR_CONDITION_HAIL: [26, 27],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada weather."""
if config.get(CONF_STATION):
ec_data = ECData(station_id=config[CONF_STATION])
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon))
add_devices([ECWeather(ec_data, config)])
class ECWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, ec_data, config):
"""Initialize Environment Canada weather."""
self.ec_data = ec_data
self.platform_name = config.get(CONF_NAME)
self.forecast_type = config[CONF_FORECAST]
@property
def attribution(self):
"""Return the attribution."""
return CONF_ATTRIBUTION
@property
def name(self):
"""Return the name of the weather entity."""
if self.platform_name:
return self.platform_name
return self.ec_data.metadata.get("location")
@property
def temperature(self):
"""Return the temperature."""
if self.ec_data.conditions.get("temperature", {}).get("value"):
return float(self.ec_data.conditions["temperature"]["value"])
if self.ec_data.hourly_forecasts[0].get("temperature"):
return float(self.ec_data.hourly_forecasts[0]["temperature"])
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
if self.ec_data.conditions.get("humidity", {}).get("value"):
return float(self.ec_data.conditions["humidity"]["value"])
return None
@property
def wind_speed(self):
"""Return the wind speed."""
if self.ec_data.conditions.get("wind_speed", {}).get("value"):
return float(self.ec_data.conditions["wind_speed"]["value"])
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self.ec_data.conditions.get("wind_bearing", {}).get("value"):
return float(self.ec_data.conditions["wind_bearing"]["value"])
return None
@property
def pressure(self):
"""Return the pressure."""
if self.ec_data.conditions.get("pressure", {}).get("value"):
return 10 * float(self.ec_data.conditions["pressure"]["value"])
return None
@property
def visibility(self):
"""Return the visibility."""
if self.ec_data.conditions.get("visibility", {}).get("value"):
return float(self.ec_data.conditions["visibility"]["value"])
return None
@property
def condition(self):
"""Return the weather condition."""
icon_code = None
if self.ec_data.conditions.get("icon_code", {}).get("value"):
icon_code = self.ec_data.conditions["icon_code"]["value"]
elif self.ec_data.hourly_forecasts[0].get("icon_code"):
icon_code = self.ec_data.hourly_forecasts[0]["icon_code"]
if icon_code:
return icon_code_to_condition(int(icon_code))
return ""
@property
def forecast(self):
"""Return the forecast array."""
return get_forecast(self.ec_data, self.forecast_type)
def update(self):
"""Get the latest data from Environment Canada."""
self.ec_data.update()
def get_forecast(ec_data, forecast_type):
"""Build the forecast array."""
forecast_array = []
if forecast_type == "daily":
half_days = ec_data.daily_forecasts
today = {
ATTR_FORECAST_TIME: dt.now().isoformat(),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[0]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[0]["precip_probability"]
),
}
if half_days[0]["temperature_class"] == "high":
today.update(
{
ATTR_FORECAST_TEMP: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[1]["temperature"]),
}
)
else:
today.update(
{
ATTR_FORECAST_TEMP_LOW: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP: int(half_days[1]["temperature"]),
}
)
forecast_array.append(today)
half_days = half_days[2:]
for day, high, low in zip(range(1, 6), range(0, 9, 2), range(1, 10, 2)):
forecast_array.append(
{
ATTR_FORECAST_TIME: (
dt.now() + datetime.timedelta(days=day)
).isoformat(),
ATTR_FORECAST_TEMP: int(half_days[high]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[low]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[high]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[high]["precip_probability"]
),
}
)
elif forecast_type == "hourly":
hours = ec_data.hourly_forecasts
for hour in range(0, 24):
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.as_local(
datetime.datetime.strptime(hours[hour]["period"], "%Y%m%d%H%M")
).isoformat(),
ATTR_FORECAST_TEMP: int(hours[hour]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(hours[hour]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
hours[hour]["precip_probability"]
),
}
)
return forecast_array
def icon_code_to_condition(icon_code):
"""Return the condition corresponding to an icon code."""
for condition, codes in ICON_CONDITION_MAP.items():
if icon_code in codes:
return condition
return None
|
|
# -*- coding: utf-8 -*-
"""
celery.app.utils
~~~~~~~~~~~~~~~~
App utilities: Compat settings, bugreport tool, pickling apps.
"""
from __future__ import absolute_import
import os
import platform as _platform
import re
from collections import Mapping
from types import ModuleType
from celery.datastructures import ConfigurationView
from celery.five import items, string_t, values
from celery.platforms import pyimplementation
from celery.utils.text import pretty
from celery.utils.imports import import_from_cwd, symbol_by_name, qualname
from .defaults import find
__all__ = ['Settings', 'appstr', 'bugreport',
'filter_hidden_settings', 'find_app']
#: Format used to generate bugreport information.
BUGREPORT_INFO = """
software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
billiard:{billiard_v} {driver_v}
platform -> system:{system} arch:{arch} imp:{py_i}
loader -> {loader}
settings -> transport:{transport} results:{results}
{human_settings}
"""
HIDDEN_SETTINGS = re.compile(
'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE',
re.IGNORECASE,
)
def appstr(app):
"""String used in __repr__ etc, to id app instances."""
return '{0}:0x{1:x}'.format(app.main or '__main__', id(app))
class Settings(ConfigurationView):
"""Celery settings object.
.. seealso:
:ref:`configuration` for a full list of configuration keys.
"""
@property
def CELERY_RESULT_BACKEND(self):
return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND')
@property
def BROKER_TRANSPORT(self):
return self.first('BROKER_TRANSPORT',
'BROKER_BACKEND', 'CARROT_BACKEND')
@property
def BROKER_BACKEND(self):
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
return self.BROKER_TRANSPORT
@property
def BROKER_URL(self):
return (os.environ.get('CELERY_BROKER_URL') or
self.first('BROKER_URL', 'BROKER_HOST'))
@property
def CELERY_TIMEZONE(self):
# this way we also support django's time zone.
return self.first('CELERY_TIMEZONE', 'TIME_ZONE')
def without_defaults(self):
"""Return the current configuration, but without defaults."""
# the last stash is the default settings, so just skip that
return Settings({}, self._order[:-1])
def value_set_for(self, key):
return key in self.without_defaults()
def find_option(self, name, namespace='celery'):
"""Search for option by name.
Will return ``(namespace, key, type)`` tuple, e.g.::
>>> from proj.celery import app
>>> app.conf.find_option('disable_rate_limits')
('CELERY', 'DISABLE_RATE_LIMITS',
<Option: type->bool default->False>))
:param name: Name of option, cannot be partial.
:keyword namespace: Preferred namespace (``CELERY`` by default).
"""
return find(name, namespace)
def find_value_for_key(self, name, namespace='celery'):
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``"""
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
def get_by_parts(self, *parts):
"""Return the current value for setting specified as a path.
Example::
>>> from proj.celery import app
>>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS')
False
"""
return self['_'.join(part for part in parts if part)]
def table(self, with_defaults=False, censored=True):
filt = filter_hidden_settings if censored else lambda v: v
return filt(dict(
(k, v) for k, v in items(
self if with_defaults else self.without_defaults())
if k.isupper() and not k.startswith('_')
))
def humanize(self, with_defaults=False, censored=True):
"""Return a human readable string showing changes to the
configuration."""
return '\n'.join(
'{0}: {1}'.format(key, pretty(value, width=50))
for key, value in items(self.table(with_defaults, censored)))
class AppPickler(object):
"""Old application pickler/unpickler (< 3.1)."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs['changes'])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs,
config_source=None):
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
accept_magic_kwargs=accept_magic_kwargs,
config_source=config_source)
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
"""Rebuild app for versions 2.5+"""
return pickler()(cls, *args)
def _unpickle_app_v2(cls, kwargs):
"""Rebuild app for versions 3.1+"""
kwargs['set_as_current'] = False
return cls(**kwargs)
def filter_hidden_settings(conf):
def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
if isinstance(key, string_t):
if HIDDEN_SETTINGS.search(key):
return mask
if 'BROKER_URL' in key.upper():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
return value
return dict((k, maybe_censor(k, v)) for k, v in items(conf))
def bugreport(app):
"""Return a string containing information useful in bug reports."""
import billiard
import celery
import kombu
try:
conn = app.connection()
driver_v = '{0}:{1}'.format(conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception:
transport = driver_v = ''
return BUGREPORT_INFO.format(
system=_platform.system(),
arch=', '.join(x for x in _platform.architecture() if x),
py_i=pyimplementation(),
celery_v=celery.VERSION_BANNER,
kombu_v=kombu.__version__,
billiard_v=billiard.__version__,
py_v=_platform.python_version(),
driver_v=driver_v,
transport=transport,
results=app.conf.CELERY_RESULT_BACKEND or 'disabled',
human_settings=app.conf.humanize(),
loader=qualname(app.loader.__class__),
)
def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
from .base import Celery
try:
sym = symbol_by_name(app, imp=imp)
except AttributeError:
# last part was not an attribute, but a module
sym = imp(app)
if isinstance(sym, ModuleType) and ':' not in app:
try:
found = sym.app
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
try:
found = sym.celery
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
if getattr(sym, '__path__', None):
try:
return find_app(
'{0}.celery'.format(app),
symbol_by_name=symbol_by_name, imp=imp,
)
except ImportError:
pass
for suspect in values(vars(sym)):
if isinstance(suspect, Celery):
return suspect
raise
else:
return found
else:
return found
return sym
|
|
import urlparse
from contextlib import closing
import requests
from pyquery import PyQuery as pq
class WebsiteMetadata(dict):
"""
A subclass of dict representing inferred metadata of a URL passed in
construction. Currently includes:
- Canonical URL
- Description
- Icon
- Keywords
- Name
Usage:
>>> WebsiteMetadata('http://mobile.nytimes.com')
{
'keywords': None,
'icon': 'http://mobile.nytimes.com/.../touch-icon-ipad-144.498cc670.png',
'canonical_url': 'http://www.nytimes.com/?nytmobile=0',
'name': 'The New York Times',
'description': None
}
"""
_valid_keys = ['canonical_url', 'description', 'icon', 'keywords', 'name']
_appletouchicon_sizes = [152, 144, 120, 114, 76, 72, None]
_msapplication_sizes = [310, 150, 70]
def __init__(self, url):
self.url = url
self._document = None
self._markup = None
self._get_metadata()
def __getitem__(self, key):
"""
Prevents invalid keys from being retrieved.
"""
if key in self._valid_keys:
return dict.__getitem__(self, key)
else:
raise KeyError(self._invalid_key % key)
def __setitem__(self, key, val):
"""
Prevents invalid keys from being set.
"""
if key in self._valid_keys:
return dict.__setitem__(self, key, val)
else:
raise KeyError(self._invalid_key % key)
def update(self):
"""
Retries the request to fetch the URL
and updates any discoverable metadata.
"""
self._get_metadata()
@property
def _invalid_key(self):
"""
Returns a comma-separated list of valid keys, useful for exception
messages.
"""
return 'Invalid key "%%s". Valid: %s' % ', '.join(self._valid_keys)
def _get_document(self):
"""
Makes a request to the passed URL, returns and stores a PyQuery
document for the response body.
If the document errors (HTTP codes 400 or higher), an instance of
requests.exceptions.HTTPError is raised, with an additional property
`response` containing the response object.
"""
with closing(requests.get(self.url, stream=True)) as response:
if response.status_code >= 400:
exception = requests.exceptions.HTTPError()
exception.response = response
raise exception
self._markup = response.content
self._document = pq(self._markup)
return self._document
def _get_metadata(self):
"""
Determines and sets the document's metadata on self.
"""
if not self._document:
self._get_document()
self['canonical_url'] = self._get_canonical_url()
self['description'] = self._get_description()
self['icon'] = self._get_icon()
self['keywords'] = self._get_keywords()
self['name'] = self._get_name()
def _get_name(self):
"""
Attempts to return a name for the object's document. If one cannot be
found, returns None.
"""
return (self._check_text('title') or
self._check_opengraph('title') or
self._check_meta('apple-mobile-web-app-title') or
self._check_meta('application-name'))
def _get_description(self):
"""
Attempts to return a description for the object's document. If one
cannot be found, returns None.
"""
return (self._check_meta('description') or
self._check_opengraph('description') or
self._check_meta('msapplication-tooltip'))
def _get_icon(self):
"""
Attempts to join the reported path to an icon for the object's
document with the URL itself. If one cannot be found, returns None.
"""
path = self._get_icon_path()
if path and not path.startswith('http'):
return urlparse.urljoin(self.url, path)
elif path and path.startswith('http'):
return path
return None
def _get_icon_path(self):
"""
Attempts to return the reported path to an icon for the object's
document. If one cannot be found, returns None.
"""
return (self._check_apple_icon() or
self._check_opengraph('image') or
self._check_ms_icon() or
self._check_link('fluid-icon'))
def _get_canonical_url(self):
"""
Attempts to return the canonical URL for the object's document. If one
cannot be found, returns None.
"""
return (self._check_link('canonical') or
self._check_opengraph('url'))
def _get_keywords(self):
"""
Attempts to return the appropriate keywords for the object's document.
If they cannot be found, returns None.
"""
return self._check_meta('keywords')
def _check_text(self, selector):
"""
Passed a CSS selector, attempts to return the DOM innerText of the
first element matching that selector. If there are no matches, returns
None.
"""
try:
return self._document(selector)[0].text
except:
return None
def _check_meta(self, name):
"""
Attempts to return the `content` attribute of the `<meta />` element
with the `name` attribute of the passed name. If one cannot be found,
returns None.
"""
"""
Attempts to return the `content` attribute of a `<meta />` tag with
the passed name. If one cannot be found, returns None.
"""
selector = 'meta[name="%s"]' % name
try:
return self._document(selector)[0].attrib['content']
except:
return None
def _check_link(self, rel):
"""
Attempts to return the `href` attribute of a `<link />` tag with `rel`
set to the passed name. If one cannot be found, returns None.
"""
selector = 'link[rel="%s"]' % rel
try:
return self._document(selector)[0].attrib['href']
except:
return None
def _check_opengraph(self, name):
"""
Attempts to return the value of an OpenGraph `<meta />` tag of the
property with the passed name. If one cannot be found, returns None.
"""
selector = 'meta[property="og:%s"]' % name
try:
return self._document(selector)[0].attrib['content']
except:
return None
def _check_apple_icon(self):
"""
Attempts to return the URL of the apple-touch-icon of the most
preferred size (based on the order in cls._appletouchicon_sizes). If
one is not defined, returns None.
"""
for size in self._appletouchicon_sizes:
icon = self._check_apple_icon_size(size)
if icon:
return icon
return None
def _check_apple_icon_size(self, dimension):
"""
Attempts to return the URL to an apple-touch-icon icon with the
passed dimension. If a dimension is not defined, checks for one with an
undefined size. If no icon matching the criteria can be found, returns
None.
"""
selector = 'link[rel="apple-touch-icon-precomposed"]'
if dimension:
selector = '%s[sizes="%sx%s"]' % (selector, dimension, dimension)
try:
return self._document(selector)[0].attrib['href']
except:
return None
def _check_ms_icon(self):
"""
Attempts to return the URL of the msapplication icon of the most
preferred size (based on the order in cls._msapplication_sizes). If one
is not defined, returns None.
"""
for size in self._msapplication_sizes:
icon = self._check_ms_icon_size(size)
if icon:
return icon
return None
def _check_ms_icon_size(self, dimension):
"""
Attempts to return the URL to an msapplication square icon with the
passed dimension. If one is not defined, returns None.
"""
selector = 'meta[name="msapplication-square%sx%slogo"]' % (dimension,
dimension)
try:
return self._document(selector)[0].attrib['content']
except:
return None
|
|
""" Test some fundamental results from wmean_2D_latlon
"""
import numpy as np
from numpy import ma
from numpy.random import random
from maud import tests_support
from maud import wmean_2D_latlon_serial, wmean_2D_latlon
try:
import cython
with_cython = True
except:
with_cython = False
if with_cython:
from cmaud import wmean_2D_latlon as cwmean_2D_latlon
#def random_input(N=10):
# I, J = (N*random(2)).astype('i')+1
WINTYPES = ['hamming', 'hann', 'blackman', 'boxcar']
def test_inputsizes():
tests_support.inputsizes_f2D(wmean_2D_latlon)
if with_cython:
tests_support.inputsizes_f2D(cwmean_2D_latlon)
def test_mask(N=4):
tests_support.masked_input_2D(wmean_2D_latlon, N)
if with_cython:
tests_support.masked_input_2D(cwmean_2D_latlon, N)
def test_whitenoise():
"""
Apply in a 3D array.
Need to improve this.
"""
grid = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(grid, grid)
#h = ma.array(random(X.shape)-0.5)
h = ma.array(random([3]+list(X.shape))-0.5)
smooth1 = wmean_2D_latlon(X, Y, h, l=700e3)
#y2 = cmaud.window_1Dmean(Z, l=l, axis=2, method='hamming')
# Large limits since the filter does not include too many numbers
assert abs(smooth1).mean() < 0.05
assert abs(smooth1).max() < 0.1
def test_2Dmasked_array(N=25):
l = N/2
# Ones array
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = random((N, N))
thr = np.percentile(data, 70)
data = ma.masked_greater(data, thr)
h = wmean_2D_latlon(X, Y, data, l=l)
assert h.mask.any()
def test_ones(N=9):
""" The energy must be preserved
Therefore, an array of ones must return only ones, even if
the input has mask, and with interp.
"""
l = N/2
print("Testing 2D array")
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = np.ones((N, N))
tests_support.eval_ones_2D(wmean_2D_latlon, X, Y, data, l)
tests_support.eval_ones_2D(cwmean_2D_latlon, X, Y, data, l)
print("Testing 3D array")
data = np.ones((3, N, N))
tests_support.eval_ones_2D(wmean_2D_latlon, X, Y, data, l)
tests_support.eval_ones_2D(cwmean_2D_latlon, X, Y, data, l)
def test_mask_at_interp():
""" Test the behavior of masked points with interp on|off
As long as the filter is wide enough to capture at least
one data point per point, the interp=True will return
"""
tests_support.mask_at_interp(wmean_2D_latlon)
if with_cython:
tests_support.mask_at_interp(cwmean_2D_latlon)
def test_Serial_x_Parallel(N=10):
"""
Improve this. Should include more possibilities like:
different arrays shapes, l, input types(array x MA)
"""
l = N/2
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = random(X.shape)
h_serial = wmean_2D_latlon_serial(X, Y, data, l=l)
h = wmean_2D_latlon(X, Y, data, l=l)
assert (h_serial == h).all()
def test_Python_x_Cython(N=10):
if not with_cython:
return
tests_support.compare2func(wmean_2D_latlon, cwmean_2D_latlon)
def latlon_2D(I=10, J=10):
""" Creates lat,lon positions for tests
"""
Lon = 560 * random((I, J)) - 180
Lat = 180 * random((I, J)) - 90
return Lat, Lon
def test_allmasked(N=10):
""" If the input is all masked, the output must be all masked
"""
I = (99 * random(N)).astype('i') + 1
J = (99 * random(N)).astype('i') + 1
for i, j in zip(I, J):
x = ma.masked_all((i, j))
Lat, Lon = latlon_2D(i, j)
h_smooth = wmean_2D_latlon(Lat, Lon, x, l=1e10)
assert h_smooth.mask.all()
def test_ones(N=10):
""" Test if filter an array of ones, return just ones
"""
I = (25 * random(N)).astype('i') + 1
J = (25 * random(N)).astype('i') + 1
for wintype in WINTYPES:
print("Testing: %s" % wintype)
for i, j in zip(I, J):
h = np.ones((i, j), dtype='f')
Lat, Lon = latlon_2D(i, j)
h_smooth = wmean_2D_latlon(Lat, Lon, h, method=wintype, l=1e10)
assert (h_smooth == h).all()
def whitenoise(Lat, Lon, l):
h = np.random.random(Lon.shape)-0.5
h_smooth = wmean_2D_latlon(Lat, Lon, 1+h, method='boxcar', l=l)
return h_smooth - 1
def hardcoded_maskedarray():
"""Test if masked data is not considered in the average
"""
h = np.array([[ 1e9, 1e9, 1e9],
[ 1e9, 3.14, 1e9],
[ 1e9, 1e9, 1e9]])
h = ma.masked_greater(h, 10)
lon = np.array([10.1, 10, 9.9])
lat = np.array([-0.1, -0.09, -0.08])
Lon, Lat = np.meshgrid(lon, lat)
h_smooth = wmean_2D_latlon(Lat, Lon, h, l=1e10)
h_smooth2 = cwindow_mean_2D_latlon(Lat, Lon, h, l=1e10)
# maud and cmaud should return the very same result
assert (h_smooth == h_smooth2).all()
assert (h_smooth.mask == h.mask).all()
#assert (h_smooth.compressed() == h.compressed()).all()
assert (np.absolute(h_smooth - h).sum() == 0.)
def random_maskedarray(N=10, res=0.1):
#lon0, lat0 = random(2)
#lon0 = 540*lon0-180 #[180, 360]
#lat0 = 180*lat0-90
grid = np.arange(-N/2, N/2)*res
Lon, Lat = np.meshgrid(grid, grid)
h = random(Lon.shape)
h = ma.masked_greater(h, 0.7)
h_smooth = wmean_2D_latlon(Lat, Lon, h, l=.1)
h_csmooth = cwindow_mean_2D_latlon(Lat, Lon, h, l=.1)
assert (h_smooth == h_csmooth).all()
h_smooth = wmean_2D_latlon(Lat, Lon, h, l=1e10)
h_csmooth = cwindow_mean_2D_latlon(Lat, Lon, h, l=1e10)
# maud and cmaud should return the very same result
assert (h_smooth == h_csmooth).all()
assert (h_smooth.mask == h.mask).all()
#assert (h_smooth.compressed() == h.compressed()).all()
assert (np.absolute(h_smooth - h).sum() == 0.)
def interp():
""" Test interp option
"""
lon = np.arange(-1, 10.01, 0.1)
lat = np.arange(-5, 1.01, 0.1)
Lon, Lat = np.meshgrid(lon, lat)
h = ma.masked_greater(np.random.random(Lon.shape), 0.7)
h_smooth = wmean_2D_latlon(Lat, Lon, h, l=2e5, interp=False)
h_csmooth = cwindow_mean_2D_latlon(Lat, Lon, h, l=2e5, interp=False)
h_smooth_i = wmean_2D_latlon(Lat, Lon, h, l=2e5, interp=True)
h_csmooth_i = cwindow_mean_2D_latlon(Lat, Lon, h, l=2e5, interp=True)
assert (h_smooth == h_csmooth).all()
assert (h_smooth_i == h_csmooth_i).all()
#assert (abs(h_smooth - h_smooth_i).sum() == 0)
assert ((h_smooth - h_smooth_i) == 0).all()
assert (h_smooth_i.compressed().size >= h_smooth.compressed().size)
def answer():
lon = np.arange(-1, 10.01, 0.1)
lat = np.arange(-5, 1.01, 0.1)
Lon, Lat = np.meshgrid(lon, lat)
# ===========================================
l = 1e6
err = whitenoise(Lat, Lon, l)
assert np.absolute(err).mean() < 0.01
# ===========================================
#interp()
#np.absolute(err).mean() > 0.02
#
#R = distance(Lon, Lat, 0, -12)
#h = np.sin(2*np.pi*R/1e7)
#noise = 0.1*np.random.random(h.shape)
#
#assert np.absolute(noise).mean() > 0.02
#
#h_smooth = wmean_2D_latlon(Lat, Lon, h, l=1e4)
#err = np.absolute(h_smooth - h).mean()
#
#h_smooth = wmean_2D_latlon(Lat, Lon, (h+noise), l=1e4)
#
#err = np.absolute(h_smooth - h).mean()
#assert err<1e-4
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wraps multiple ways to communicate over SSH.
"""
from typing import Type
from typing import Optional
from typing import Tuple
from typing import List
from typing import Union
from typing import cast
have_paramiko = False
try:
import paramiko
have_paramiko = True
except ImportError:
pass
# Depending on your version of Paramiko, it may cause a deprecation
# warning on Python 2.6.
# Ref: https://bugs.launchpad.net/paramiko/+bug/392973
import os
import re
import time
import subprocess
import logging
import warnings
from os.path import split as psplit
from os.path import join as pjoin
from libcloud.utils.logging import ExtraLogFormatter
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import b
__all__ = [
'BaseSSHClient',
'ParamikoSSHClient',
'ShellOutSSHClient',
'SSHCommandTimeoutError'
]
SUPPORTED_KEY_TYPES_URL = 'https://libcloud.readthedocs.io/en/latest/compute/deployment.html#supported-private-ssh-key-types' # NOQA
class SSHCommandTimeoutError(Exception):
"""
Exception which is raised when an SSH command times out.
"""
def __init__(self, cmd, timeout, stdout=None, stderr=None):
# type: (str, float, Optional[str], Optional[str]) -> None
self.cmd = cmd
self.timeout = timeout
self.stdout = stdout
self.stderr = stderr
self.message = 'Command didn\'t finish in %s seconds' % (timeout)
super(SSHCommandTimeoutError, self).__init__(self.message)
def __repr__(self):
return ('<SSHCommandTimeoutError: cmd="%s",timeout=%s)>' %
(self.cmd, self.timeout))
def __str__(self):
return self.__repr__()
class BaseSSHClient(object):
"""
Base class representing a connection over SSH/SCP to a remote node.
"""
def __init__(self,
hostname, # type: str
port=22, # type: int
username='root', # type: str
password=None, # type: Optional[str]
key=None, # type: Optional[str]
key_files=None, # type: Optional[Union[str, List[str]]]
timeout=None # type: Optional[float]
):
"""
:type hostname: ``str``
:keyword hostname: Hostname or IP address to connect to.
:type port: ``int``
:keyword port: TCP port to communicate on, defaults to 22.
:type username: ``str``
:keyword username: Username to use, defaults to root.
:type password: ``str``
:keyword password: Password to authenticate with or a password used
to unlock a private key if a password protected key
is used.
:param key: Deprecated in favor of ``key_files`` argument.
:type key_files: ``str`` or ``list``
:keyword key_files: A list of paths to the private key files to use.
"""
if key is not None:
message = ('You are using deprecated "key" argument which has '
'been replaced with "key_files" argument')
warnings.warn(message, DeprecationWarning)
# key_files has precedent
key_files = key if not key_files else key_files
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.key_files = key_files
self.timeout = timeout
def connect(self):
# type: () -> bool
"""
Connect to the remote node over SSH.
:return: True if the connection has been successfully established,
False otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'connect not implemented for this ssh client')
def put(self, path, contents=None, chmod=None, mode='w'):
# type: (str, Optional[Union[str, bytes]], Optional[int], str) -> str
"""
Upload a file to the remote node.
:type path: ``str``
:keyword path: File path on the remote node.
:type contents: ``str``
:keyword contents: File Contents.
:type chmod: ``int``
:keyword chmod: chmod file to this after creation.
:type mode: ``str``
:keyword mode: Mode in which the file is opened.
:return: Full path to the location where a file has been saved.
:rtype: ``str``
"""
raise NotImplementedError(
'put not implemented for this ssh client')
def putfo(self, path, fo=None, chmod=None):
"""
Upload file like object to the remote server.
:param path: Path to upload the file to.
:type path: ``str``
:param fo: File like object to read the content from.
:type fo: File handle or file like object.
:type chmod: ``int``
:keyword chmod: chmod file to this after creation.
:return: Full path to the location where a file has been saved.
:rtype: ``str``
"""
raise NotImplementedError(
'putfo not implemented for this ssh client')
def delete(self, path):
# type: (str) -> bool
"""
Delete/Unlink a file on the remote node.
:type path: ``str``
:keyword path: File path on the remote node.
:return: True if the file has been successfully deleted, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete not implemented for this ssh client')
def run(self, cmd, timeout=None):
# type: (str, Optional[float]) -> Tuple[str, str, int]
"""
Run a command on a remote node.
:type cmd: ``str``
:keyword cmd: Command to run.
:return ``list`` of [stdout, stderr, exit_status]
"""
raise NotImplementedError(
'run not implemented for this ssh client')
def close(self):
# type: () -> bool
"""
Shutdown connection to the remote node.
:return: True if the connection has been successfully closed, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'close not implemented for this ssh client')
def _get_and_setup_logger(self):
# type: () -> logging.Logger
logger = logging.getLogger('libcloud.compute.ssh')
path = os.getenv('LIBCLOUD_DEBUG')
if path:
handler = logging.FileHandler(path)
handler.setFormatter(ExtraLogFormatter())
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
class ParamikoSSHClient(BaseSSHClient):
"""
A SSH Client powered by Paramiko.
"""
# Maximum number of bytes to read at once from a socket
CHUNK_SIZE = 4096
# How long to sleep while waiting for command to finish (to prevent busy
# waiting)
SLEEP_DELAY = 0.2
def __init__(self,
hostname, # type: str
port=22, # type: int
username='root', # type: str
password=None, # type: Optional[str]
key=None, # type: Optional[str]
key_files=None, # type: Optional[Union[str, List[str]]]
key_material=None, # type: Optional[str]
timeout=None, # type: Optional[float]
keep_alive=None, # type: Optional[int]
use_compression=False # type: bool
):
"""
Authentication is always attempted in the following order:
- The key passed in (if key is provided)
- Any key we can find through an SSH agent (only if no password and
key is provided)
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no
password and key is provided)
- Plain username/password auth, if a password was given (if password is
provided)
:param keep_alive: Optional keep alive internal (in seconds) to use.
:type keep_alive: ``int``
:param use_compression: True to use compression.
:type use_compression: ``bool``
"""
if key_files and key_material:
raise ValueError(('key_files and key_material arguments are '
'mutually exclusive'))
super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port,
username=username,
password=password,
key=key,
key_files=key_files,
timeout=timeout)
self.key_material = key_material
self.keep_alive = keep_alive
self.use_compression = use_compression
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.logger = self._get_and_setup_logger()
# This object is lazily created on first SFTP operation (e.g. put()
# method call)
self.sftp_client = None
def connect(self):
conninfo = {'hostname': self.hostname,
'port': self.port,
'username': self.username,
'allow_agent': False,
'look_for_keys': False}
if self.password:
conninfo['password'] = self.password
if self.key_files:
conninfo['key_filename'] = self.key_files
if self.key_material:
conninfo['pkey'] = self._get_pkey_object(
key=self.key_material,
password=self.password)
if not self.password and not (self.key_files or self.key_material):
conninfo['allow_agent'] = True
conninfo['look_for_keys'] = True
if self.timeout:
conninfo['timeout'] = self.timeout
# This is a workaround for paramiko only supporting key files in
# format staring with "BEGIN RSA PRIVATE KEY".
# If key_files are provided and a key looks like a PEM formatted key
# we try to convert it into a format supported by paramiko
if (self.key_files and not isinstance(self.key_files, (list, tuple))
and os.path.isfile(self.key_files)):
with open(self.key_files, 'r') as fp:
key_material = fp.read()
try:
pkey = self._get_pkey_object(key=key_material,
password=self.password)
except paramiko.ssh_exception.PasswordRequiredException as e:
raise e
except Exception:
pass
else:
# It appears key is valid, but it was passed in in an invalid
# format. Try to use the converted key directly
del conninfo['key_filename']
conninfo['pkey'] = pkey
extra = {'_hostname': self.hostname, '_port': self.port,
'_username': self.username, '_timeout': self.timeout}
if self.password:
extra['_auth_method'] = 'password'
else:
extra['_auth_method'] = 'key_file'
if self.key_files:
extra['_key_file'] = self.key_files
self.logger.debug('Connecting to server', extra=extra)
self.client.connect(**conninfo)
return True
def put(self, path, contents=None, chmod=None, mode='w'):
extra = {'_path': path, '_mode': mode, '_chmod': chmod}
self.logger.debug('Uploading file', extra=extra)
sftp = self._get_sftp_client()
# less than ideal, but we need to mkdir stuff otherwise file() fails
head, tail = psplit(path)
if path[0] == "/":
sftp.chdir("/")
else:
# Relative path - start from a home directory (~)
sftp.chdir('.')
for part in head.split("/"):
if part != "":
try:
sftp.mkdir(part)
except IOError:
# so, there doesn't seem to be a way to
# catch EEXIST consistently *sigh*
pass
sftp.chdir(part)
cwd = sftp.getcwd()
cwd = self._sanitize_cwd(cwd=cwd)
ak = sftp.file(tail, mode=mode)
ak.write(contents)
if chmod is not None:
ak.chmod(chmod)
ak.close()
file_path = self._sanitize_file_path(cwd=cwd, file_path=path)
return file_path
def putfo(self, path, fo=None, chmod=None):
"""
Upload file like object to the remote server.
Unlike put(), this method operates on file objects and not directly on
file content which makes it much more efficient for large files since
it utilizes pipelining.
"""
extra = {'_path': path, '_chmod': chmod}
self.logger.debug('Uploading file', extra=extra)
sftp = self._get_sftp_client()
# less than ideal, but we need to mkdir stuff otherwise file() fails
head, tail = psplit(path)
if path[0] == "/":
sftp.chdir("/")
else:
# Relative path - start from a home directory (~)
sftp.chdir('.')
for part in head.split("/"):
if part != "":
try:
sftp.mkdir(part)
except IOError:
# so, there doesn't seem to be a way to
# catch EEXIST consistently *sigh*
pass
sftp.chdir(part)
cwd = sftp.getcwd()
cwd = self._sanitize_cwd(cwd=cwd)
sftp.putfo(fo, path)
if chmod is not None:
ak = sftp.file(tail)
ak.chmod(chmod)
ak.close()
file_path = self._sanitize_file_path(cwd=cwd, file_path=path)
return file_path
def delete(self, path):
extra = {'_path': path}
self.logger.debug('Deleting file', extra=extra)
sftp = self.client.open_sftp()
sftp.unlink(path)
sftp.close()
return True
def run(self, cmd, timeout=None):
# type: (str, Optional[float]) -> Tuple[str, str, int]
"""
Note: This function is based on paramiko's exec_command()
method.
:param timeout: How long to wait (in seconds) for the command to
finish (optional).
:type timeout: ``float``
"""
extra1 = {'_cmd': cmd}
self.logger.debug('Executing command', extra=extra1)
# Use the system default buffer size
bufsize = -1
transport = self._get_transport()
chan = transport.open_session()
start_time = time.time()
chan.exec_command(cmd)
stdout = StringIO()
stderr = StringIO()
# Create a stdin file and immediately close it to prevent any
# interactive script from hanging the process.
stdin = chan.makefile('wb', bufsize)
stdin.close()
# Receive all the output
# Note #1: This is used instead of chan.makefile approach to prevent
# buffering issues and hanging if the executed command produces a lot
# of output.
#
# Note #2: If you are going to remove "ready" checks inside the loop
# you are going to have a bad time. Trying to consume from a channel
# which is not ready will block for indefinitely.
exit_status_ready = chan.exit_status_ready()
if exit_status_ready:
# It's possible that some data is already available when exit
# status is ready
stdout.write(self._consume_stdout(chan).getvalue())
stderr.write(self._consume_stderr(chan).getvalue())
while not exit_status_ready:
current_time = time.time()
elapsed_time = (current_time - start_time)
if timeout and (elapsed_time > timeout):
# TODO: Is this the right way to clean up?
chan.close()
stdout_str = stdout.getvalue() # type: str
stderr_str = stderr.getvalue() # type: str
raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout,
stdout=stdout_str,
stderr=stderr_str)
stdout.write(self._consume_stdout(chan).getvalue())
stderr.write(self._consume_stderr(chan).getvalue())
# We need to check the exist status here, because the command could
# print some output and exit during this sleep below.
exit_status_ready = chan.exit_status_ready()
if exit_status_ready:
break
# Short sleep to prevent busy waiting
time.sleep(self.SLEEP_DELAY)
# Receive the exit status code of the command we ran.
status = chan.recv_exit_status() # type: int
stdout_str = stdout.getvalue()
stderr_str = stderr.getvalue()
extra2 = {'_status': status, '_stdout': stdout_str,
'_stderr': stderr_str}
self.logger.debug('Command finished', extra=extra2)
result = (stdout_str, stderr_str, status) # type: Tuple[str, str, int]
return result
def close(self):
self.logger.debug('Closing server connection')
if self.client:
self.client.close()
if self.sftp_client:
self.sftp_client.close()
return True
def _consume_stdout(self, chan):
"""
Try to consume stdout data from chan if it's receive ready.
"""
stdout = self._consume_data_from_channel(
chan=chan,
recv_method=chan.recv,
recv_ready_method=chan.recv_ready)
return stdout
def _consume_stderr(self, chan):
"""
Try to consume stderr data from chan if it's receive ready.
"""
stderr = self._consume_data_from_channel(
chan=chan,
recv_method=chan.recv_stderr,
recv_ready_method=chan.recv_stderr_ready)
return stderr
def _consume_data_from_channel(self, chan, recv_method, recv_ready_method):
"""
Try to consume data from the provided channel.
Keep in mind that data is only consumed if the channel is receive
ready.
"""
result = StringIO()
result_bytes = bytearray()
if recv_ready_method():
data = recv_method(self.CHUNK_SIZE)
result_bytes += b(data)
while data:
ready = recv_ready_method()
if not ready:
break
data = recv_method(self.CHUNK_SIZE)
result_bytes += b(data)
# We only decode data at the end because a single chunk could contain
# a part of multi byte UTF-8 character (whole multi bytes character
# could be split over two chunks)
result.write(result_bytes.decode('utf-8', errors='ignore'))
return result
def _get_pkey_object(self, key, password=None):
"""
Try to detect private key type and return paramiko.PKey object.
# NOTE: Paramiko only supports key in PKCS#1 PEM format.
"""
key_types = [
(paramiko.RSAKey, 'RSA'),
(paramiko.DSSKey, 'DSA'),
(paramiko.ECDSAKey, 'EC')
]
paramiko_version = getattr(paramiko, '__version__', '0.0.0')
paramiko_version = tuple([int(c) for c in paramiko_version.split('.')])
if paramiko_version >= (2, 2, 0):
# Ed25519 is only supported in paramiko >= 2.2.0
key_types.append((paramiko.ed25519key.Ed25519Key, 'Ed25519'))
for cls, key_type in key_types:
# Work around for paramiko not recognizing keys which start with
# "----BEGIN PRIVATE KEY-----"
# Since key is already in PEM format, we just try changing the
# header and footer
key_split = key.strip().splitlines()
if (key_split[0] == '-----BEGIN PRIVATE KEY-----' and
key_split[-1] == '-----END PRIVATE KEY-----'):
key_split[0] = '-----BEGIN %s PRIVATE KEY-----' % (key_type)
key_split[-1] = '-----END %s PRIVATE KEY-----' % (key_type)
key_value = '\n'.join(key_split)
else:
# Already a valid key, us it as is
key_value = key
try:
key = cls.from_private_key(StringIO(key_value), password)
except paramiko.ssh_exception.PasswordRequiredException as e:
raise e
except (paramiko.ssh_exception.SSHException, AssertionError) as e:
if 'private key file checkints do not match' in str(e).lower():
msg = ('Invalid password provided for encrypted key. '
'Original error: %s' % (str(e)))
# Indicates invalid password for password protected keys
raise paramiko.ssh_exception.SSHException(msg)
# Invalid key, try other key type
pass
else:
return key
msg = ('Invalid or unsupported key type (only RSA, DSS, ECDSA and'
' Ed25519 keys'
' in PEM format are supported). For more information on '
' supported key file types, see %s' % (SUPPORTED_KEY_TYPES_URL))
raise paramiko.ssh_exception.SSHException(msg)
def _sanitize_cwd(self, cwd):
# type: (str) -> str
# getcwd() returns an invalid path when executing commands on Windows
# so we need a special case for that scenario
# For example, we convert /C:/Users/Foo -> C:/Users/Foo
if re.match(r"^\/\w\:.*$", str(cwd)):
cwd = str(cwd[1:])
return cwd
def _sanitize_file_path(self, cwd, file_path):
# type: (str, str) -> str
"""
Sanitize the provided file path and ensure we always return an
absolute path, even if relative path is passed to to this function.
"""
if file_path[0] in ['/', '\\'] or re.match(r"^\w\:.*$", file_path):
# If it's an absolute path we return path as is
# NOTE: We assume it's a Windows absolute path if it's starts with
# a drive letter - e.g. C:\\..., D:\\, etc. or with \
pass
else:
if re.match(r"^\w\:.*$", cwd):
# Windows path
file_path = cwd + '\\' + file_path
else:
file_path = pjoin(cwd, file_path)
return file_path
def _get_transport(self):
"""
Return transport object taking into account keep alive and compression
options passed to the constructor.
"""
transport = self.client.get_transport()
if self.keep_alive:
transport.set_keepalive(self.keep_alive)
if self.use_compression:
transport.use_compression(compress=True)
return transport
def _get_sftp_client(self):
"""
Create SFTP client from the underlying SSH client.
This method tries to re-use the existing self.sftp_client (if it
exists) and it also tries to verify the connection is opened and if
it's not, it will try to re-establish it.
"""
if not self.sftp_client:
self.sftp_client = self.client.open_sftp()
sftp_client = self.sftp_client
# Verify the connection is still open, if it's not, try to
# re-establish it.
# We do that, by calling listdir(). If it returns "Socket is closed"
# error we assume the connection is closed and we try to re-establish
# it.
try:
sftp_client.listdir(".")
except OSError as e:
if "socket is closed" in str(e).lower():
self.sftp_client = self.client.open_sftp()
elif "no such file" in str(e).lower():
# Not a fatal exception, means connection is still open
pass
else:
raise e
return self.sftp_client
class ShellOutSSHClient(BaseSSHClient):
"""
This client shells out to "ssh" binary to run commands on the remote
server.
Note: This client should not be used in production.
"""
def __init__(self,
hostname, # type: str
port=22, # type: int
username='root', # type: str
password=None, # type: Optional[str]
key=None, # type: Optional[str]
key_files=None, # type: Optional[str]
timeout=None # type: Optional[float]
):
super(ShellOutSSHClient, self).__init__(hostname=hostname,
port=port, username=username,
password=password,
key=key,
key_files=key_files,
timeout=timeout)
if self.password:
raise ValueError('ShellOutSSHClient only supports key auth')
child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
child.communicate()
if child.returncode == 127:
raise ValueError('ssh client is not available')
self.logger = self._get_and_setup_logger()
def connect(self):
"""
This client doesn't support persistent connections establish a new
connection every time "run" method is called.
"""
return True
def run(self, cmd, timeout=None):
return self._run_remote_shell_command([cmd])
def put(self, path, contents=None, chmod=None, mode='w'):
if mode == 'w':
redirect = '>'
elif mode == 'a':
redirect = '>>'
else:
raise ValueError('Invalid mode: ' + mode)
cmd = ['echo "%s" %s %s' % (contents, redirect, path)]
self._run_remote_shell_command(cmd)
return path
def putfo(self, path, fo=None, chmod=None):
content = fo.read()
return self.put(path=path, contents=content, chmod=chmod)
def delete(self, path):
cmd = ['rm', '-rf', path]
self._run_remote_shell_command(cmd)
return True
def close(self):
return True
def _get_base_ssh_command(self):
# type: () -> List[str]
cmd = ['ssh']
if self.key_files:
self.key_files = cast(str, self.key_files)
cmd += ['-i', self.key_files]
if self.timeout:
cmd += ['-oConnectTimeout=%s' % (self.timeout)]
cmd += ['%s@%s' % (self.username, self.hostname)]
return cmd
def _run_remote_shell_command(self, cmd):
# type: (List[str]) -> Tuple[str, str, int]
"""
Run a command on a remote server.
:param cmd: Command to run.
:type cmd: ``list`` of ``str``
:return: Command stdout, stderr and status code.
:rtype: ``tuple``
"""
base_cmd = self._get_base_ssh_command()
full_cmd = base_cmd + [' '.join(cmd)]
self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd)))
child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
stdout_str = cast(str, stdout)
stderr_str = cast(str, stdout)
return (stdout_str, stderr_str, child.returncode)
class MockSSHClient(BaseSSHClient):
pass
SSHClient = ParamikoSSHClient # type: Type[BaseSSHClient]
if not have_paramiko:
SSHClient = MockSSHClient # type: ignore
|
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
from __future__ import division
import yaml
import numpy as np
import numpy.random as npr
import pdb
from ..utils.cython_bbox import bbox_overlaps, bbox_intersections
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..fast_rcnn.bbox_transform import bbox_transform
# <<<< obsolete
DEBUG = False
def proposal_target_layer(rpn_rois, gt_boxes, gt_ishard, dontcare_areas, _num_classes):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
Parameters
----------
rpn_rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]
gt_boxes: (G, 5) [x1 ,y1 ,x2, y2, class] int
gt_ishard: (G, 1) {0 | 1} 1 indicates hard
dontcare_areas: (D, 4) [ x1, y1, x2, y2]
_num_classes
----------
Returns
----------
rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]
labels: (1 x H x W x A, 1) {0,1,...,_num_classes-1}
bbox_targets: (1 x H x W x A, K x4) [dx1, dy1, dx2, dy2]
bbox_inside_weights: (1 x H x W x A, Kx4) 0, 1 masks for the computing loss
bbox_outside_weights: (1 x H x W x A, Kx4) 0, 1 masks for the computing loss
"""
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = rpn_rois
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
# Include ground-truth boxes in the set of candidate rois
if cfg.TRAIN.PRECLUDE_HARD_SAMPLES and gt_ishard is not None and gt_ishard.shape[0] > 0:
assert gt_ishard.shape[0] == gt_boxes.shape[0]
gt_ishard = gt_ishard.astype(int)
gt_easyboxes = gt_boxes[gt_ishard != 1, :]
else:
gt_easyboxes = gt_boxes
"""
add the ground-truth to rois will cause zero loss! not good for visuallization
"""
jittered_gt_boxes = _jitter_gt_boxes(gt_easyboxes)
zeros = np.zeros((gt_easyboxes.shape[0] * 2, 1), dtype=gt_easyboxes.dtype)
all_rois = np.vstack((all_rois, \
np.hstack((zeros, np.vstack((gt_easyboxes[:, :-1], jittered_gt_boxes[:, :-1]))))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE // num_images
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
# Sample rois with classification labels and bounding box regression
# targets
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, gt_ishard, dontcare_areas, fg_rois_per_image,
rois_per_image, _num_classes)
# _count = 1
# if DEBUG:
# if _count == 1:
# _fg_num, _bg_num = 0, 0
# print 'num fg: {}'.format((labels > 0).sum())
# print 'num bg: {}'.format((labels == 0).sum())
# _count += 1
# _fg_num += (labels > 0).sum()
# _bg_num += (labels == 0).sum()
# print 'num fg avg: {}'.format(_fg_num / _count)
# print 'num bg avg: {}'.format(_bg_num / _count)
# print 'ratio: {:.3f}'.format(float(_fg_num) / float(_bg_num))
rois = rois.reshape(-1, 5)
labels = labels.reshape(-1, 1)
bbox_targets = bbox_targets.reshape(-1, _num_classes*4)
bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes*4)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights
def _sample_rois(all_rois, gt_boxes, gt_ishard, dontcare_areas, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: R x G
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1) # R
max_overlaps = overlaps.max(axis=1) # R
labels = gt_boxes[gt_assignment, 4]
# preclude hard samples
ignore_inds = np.empty(shape = (0), dtype=int)
if cfg.TRAIN.PRECLUDE_HARD_SAMPLES and gt_ishard is not None and gt_ishard.shape[0] > 0:
gt_ishard = gt_ishard.astype(int)
gt_hardboxes = gt_boxes[gt_ishard == 1, :]
if gt_hardboxes.shape[0] > 0:
# R x H
hard_overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_hardboxes[:, :4], dtype=np.float))
hard_max_overlaps = hard_overlaps.max(axis=1) # R x 1
# hard_gt_assignment = hard_overlaps.argmax(axis=0) # H
ignore_inds = np.append(ignore_inds, \
np.where(hard_max_overlaps >= cfg.TRAIN.FG_THRESH)[0])
# if DEBUG:
# if ignore_inds.size > 1:
# print 'num hard: {:d}:'.format(ignore_inds.size)
# print 'hard box:', gt_hardboxes
# print 'rois: '
# print all_rois[ignore_inds]
# preclude dontcare areas
if dontcare_areas is not None and dontcare_areas.shape[0] > 0:
# intersec shape is D x R
intersecs = bbox_intersections(
np.ascontiguousarray(dontcare_areas, dtype=np.float), # D x 4
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float) # R x 4
)
intersecs_sum = intersecs.sum(axis=0) # R x 1
ignore_inds = np.append(ignore_inds, \
np.where(intersecs_sum > cfg.TRAIN.DONTCARE_AREA_INTERSECTION_HI)[0])
# if ignore_inds.size >= 1:
# print 'num dontcare: {:d}:'.format(ignore_inds.size)
# print 'dontcare box:', dontcare_areas.astype(int)
# print 'rois: '
# print all_rois[ignore_inds].astype(int)
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
fg_inds = np.setdiff1d(fg_inds, ignore_inds)
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
bg_inds = np.setdiff1d(bg_inds, ignore_inds)
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
# bbox_target_data (1 x H x W x A, 5)
# bbox_targets <- (1 x H x W x A, K x 4)
# bbox_inside_weights <- (1 x H x W x A, K x 4)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _jitter_gt_boxes(gt_boxes, jitter=0.05):
""" jitter the gtboxes, before adding them into rois, to be more robust for cls and rgs
gt_boxes: (G, 5) [x1 ,y1 ,x2, y2, class] int
"""
jittered_boxes = gt_boxes.copy()
ws = jittered_boxes[:, 2] - jittered_boxes[:, 0] + 1.0
hs = jittered_boxes[:, 3] - jittered_boxes[:, 1] + 1.0
width_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * ws
height_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * hs
jittered_boxes[:, 0] += width_offset
jittered_boxes[:, 2] += width_offset
jittered_boxes[:, 1] += height_offset
jittered_boxes[:, 3] += height_offset
return jittered_boxes
|
|
"""
Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC)
and calculates the anomaly from the 1981-2010 median
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : Zachary M. Labe
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib.request
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
ice = dataset[:,3]
missing = dataset[:,4]
### Find current year (2017)
yr2018 = np.where(year == 2018)[0]
sie18 = ice[yr2018]
### Ice Conversion
iceval = sie18 * 1e6
### Printing info
print('\n----- NSIDC Arctic Sea Ice -----')
print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n')
print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1])))
print('Current SIE = %s km^2 \n' % (iceval[-1]))
print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2]))
print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8]))
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4,5,6,7])
### Create variables
doy = dataset2[:,0]
meanice = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Quartiles
quartile10 = dataset2[:,3]
quartile25 = dataset2[:,4]
quartile50 = dataset2[:,5]
quartile75 = dataset2[:,6]
quartile90 = dataset2[:,7]
### Anomalies
currentanom = iceval[-1]-meanice[currentdoy-2]
### Printing info
print('Current anomaly = %s km^2 \n' % currentanom)
### Finding select years since 2012
yr2012 = np.where(year == 2012)[0]
yr2013 = np.where(year == 2013)[0]
yr2014 = np.where(year == 2014)[0]
yr2015 = np.where(year == 2015)[0]
yr2016 = np.where(year == 2016)[0]
### Calculate anomaly from their median
sie12 = ice[yr2012] - quartile50
sie13 = ice[yr2013] - quartile50[:-1]
sie14 = ice[yr2014] - quartile50[:-1]
sie15 = ice[yr2015] - quartile50[:-1]
sie16 = ice[yr2016] - quartile50
sie17 = ice[yr2017] - quartile50[:-1]
sie18 = sie18 - quartile50[:len(sie18)]
### Append years to extented list
extend5 = np.append(sie12,sie13,axis=0)
extend4 = np.append(extend5,sie14,axis=0)
extend3 = np.append(extend4,sie15,axis=0)
extend2 = np.append(extend3,sie16,axis=0)
extend1 = np.append(extend2,sie17,axis=0)
extend = np.append(extend1,sie18,axis=0)
### Find median to plot
median = np.tile(quartile50,6)
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = map(str,np.arange(2012,2020,1))
plt.xticks(np.arange(0,2556,365),xlabels,rotation=0)
ylabels = [r'-5',r'-4',r'-3',r'-2',r'-1',r'\textbf{0.0}',r'1',r'2',r'3',r'4',r'5']
plt.yticks(np.arange(-5,6,1),ylabels)
plt.ylim([-5,5])
plt.xlim([0,2555])
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=7.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
upper2std = (meanice/1e6)+(std*2)
lower2std = (meanice/1e6)-(std*2)
ax.yaxis.grid(zorder=1,color='w',alpha=0.35)
zeroline = [0]*2191
recdiff_masked = np.ma.masked_less_equal(extend, 0)
plt.bar(np.arange(len(extend)),extend,color='r',
edgecolor='r',zorder=9)
plt.bar(np.arange(len(extend)),recdiff_masked.filled(np.nan),
color='dodgerblue',edgecolor='dodgerblue',zorder=10)
plt.ylabel(r'\textbf{Extent Anomalies} [$\times$10$^{6}$ km$^2$]',fontsize=13,
color='darkgrey')
plt.title(r'\textbf{ARCTIC SEA ICE EXTENT ANOMALIES}',
fontsize=20,color='darkgray')
plt.text(1195,0.25,r'\textbf{1981-2010 Climatology}',fontsize=8,
rotation=0,ha='center',color='darkgrey')
plt.text(155,0.8,r'$\bf{\rightarrow}$',fontsize=35,rotation=230,ha='center',
color='dodgerblue')
plt.text(len(extend)+30,-0.27,r'\textbf{Today!}',fontsize=8,rotation=270,ha='center',
color='r')
plt.text(0.5,-4.45,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,-4.70,r'\textbf{CSV:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,-4.95,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
fig.subplots_adjust(top=0.91)
plt.savefig(directoryfigure + 'nsidc_sie_median.png',dpi=300)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
# This little construct ensures we can run even if we have a bad version of
# psutil installed. If so, we'll just skip the test that needs it.
_HAS_CORRECT_PSUTIL_VERSION = False
try:
import psutil
if 'version_info' in dir(psutil):
# If psutil has any version info at all, it's recent enough.
_HAS_CORRECT_PSUTIL_VERSION = True
except ImportError, e:
pass
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import pyauto_utils
import webrtc_test_base
class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Test we can set up a WebRTC call and disconnect it.
Prerequisites: This test case must run on a machine with a webcam, either
fake or real, and with some kind of audio device. You must make the
peerconnection_server target before you run.
The test case will launch a custom binary
(peerconnection_server) which will allow two WebRTC clients to find each
other. For more details, see the source code which is available at the site
http://code.google.com/p/libjingle/source/browse/ (make sure to browse to
trunk/talk/examples/peerconnection/server).
"""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.StartPeerConnectionServer()
def tearDown(self):
self.StopPeerConnectionServer()
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes())
def _SimpleWebrtcCall(self, request_video, request_audio, duration_seconds=0):
"""Tests we can call and hang up with WebRTC.
This test exercises pretty much the whole happy-case for the WebRTC
JavaScript API. Currently, it exercises a normal call setup using the API
defined at http://dev.w3.org/2011/webrtc/editor/webrtc.html. The API is
still evolving.
The test will load the supplied HTML file, which in turn will load different
javascript files depending on which version of the signaling protocol
we are running.
The supplied HTML file will be loaded in two tabs and tell the web
pages to start up WebRTC, which will acquire video and audio devices on the
system. This will launch a dialog in Chrome which we click past using the
automation controller. Then, we will order both tabs to connect the server,
which will make the two tabs aware of each other. Once that is done we order
one tab to call the other.
We make sure that the javascript tells us that the call succeeded, lets it
run for a while and try to hang up the call after that. We verify video is
playing by using the video detector.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
duration_seconds: The number of seconds to keep the call up before
shutting it down.
"""
self._SetupCall(request_video=request_video, request_audio=request_audio)
if duration_seconds:
print 'Call up: sleeping %d seconds...' % duration_seconds
time.sleep(duration_seconds);
# The hang-up will automatically propagate to the second tab.
self.HangUp(from_tab_with_index=0)
self.WaitUntilHangUpVerified(tab_index=1)
self.Disconnect(tab_index=0)
self.Disconnect(tab_index=1)
# Ensure we didn't miss any errors.
self.AssertNoFailures(tab_index=0)
self.AssertNoFailures(tab_index=1)
def testWebrtcCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=True, request_audio=True)
def testWebrtcVideoOnlyCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=True, request_audio=False)
def testWebrtcAudioOnlyCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=False, request_audio=True)
def testWebrtcJsep01CallAndMeasureCpu20Seconds(self):
if not _HAS_CORRECT_PSUTIL_VERSION:
print ('WARNING: Can not run cpu/mem measurements with this version of '
'psutil. You must have at least psutil 0.4.1 installed for the '
'version of python you are running this test with.')
return
self.LoadTestPageInTwoTabs(test_page='webrtc_jsep01_test.html')
# Prepare CPU measurements.
renderer_process = self._GetChromeRendererProcess(tab_index=0)
renderer_process.get_cpu_percent()
self._SimpleWebrtcCall(request_video=True,
request_audio=True,
duration_seconds=20)
cpu_usage = renderer_process.get_cpu_percent(interval=0)
mem_usage_bytes = renderer_process.get_memory_info()[0]
mem_usage_kb = float(mem_usage_bytes) / 1024
pyauto_utils.PrintPerfResult('cpu', 'jsep01_call', cpu_usage, '%')
pyauto_utils.PrintPerfResult('memory', 'jsep01_call', mem_usage_kb, 'KiB')
def testLocalPreview(self):
"""Brings up a local preview and ensures video is playing.
This test will launch a window with a single tab and run a getUserMedia call
which will give us access to the webcam and microphone. Then the javascript
code will hook up the webcam data to the local-view video tag. We will
detect video in that tag using the video detector, and if we see video
moving the test passes.
"""
self.LoadTestPageInOneTab()
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
self._StartDetectingVideo(tab_index=0, video_element='local-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
def testHandlesNewGetUserMediaRequestSeparately(self):
"""Ensures WebRTC doesn't allow new requests to piggy-back on old ones."""
self.LoadTestPageInTwoTabs()
self.GetUserMedia(tab_index=0)
self.GetUserMedia(tab_index=1)
self.Connect("user_1", tab_index=0)
self.Connect("user_2", tab_index=1)
self.CreatePeerConnection(tab_index=0)
self.AddUserMediaLocalStream(tab_index=0)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
self.assertEquals('failed-with-error-PERMISSION_DENIED',
self.GetUserMedia(tab_index=0, action='cancel'))
self.assertEquals('failed-with-error-PERMISSION_DENIED',
self.GetUserMedia(tab_index=0, action='dismiss'))
def _SetupCall(self, request_video, request_audio):
"""Gets user media and establishes a call.
Assumes that two tabs are already opened with a suitable test page.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
"""
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=0, request_video=request_video, request_audio=request_audio))
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=1, request_video=request_video, request_audio=request_audio))
self.Connect('user_1', tab_index=0)
self.Connect('user_2', tab_index=1)
self.CreatePeerConnection(tab_index=0)
self.AddUserMediaLocalStream(tab_index=0)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
if request_video:
self._StartDetectingVideo(tab_index=0, video_element='remote-view')
self._StartDetectingVideo(tab_index=1, video_element='remote-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
self._WaitForVideo(tab_index=1, expect_playing=True)
def _StartDetectingVideo(self, tab_index, video_element):
self.assertEquals('ok-started', self.ExecuteJavascript(
'startDetection("%s", "frame-buffer", 320, 240)' % video_element,
tab_index=tab_index));
def _WaitForVideo(self, tab_index, expect_playing):
# TODO(phoglund): Remove this hack if we manage to get a more stable Linux
# bot to run these tests.
if self.IsLinux():
print "Linux; pretending to wait for video..."
time.sleep(1)
return
expect_retval='video-playing' if expect_playing else 'video-not-playing'
video_playing = self.WaitUntil(
function=lambda: self.ExecuteJavascript('isVideoPlaying()',
tab_index=tab_index),
expect_retval=expect_retval)
self.assertTrue(video_playing,
msg= 'Timed out while waiting for isVideoPlaying to ' +
'return ' + expect_retval + '.')
def _GetChromeRendererProcess(self, tab_index):
"""Returns the Chrome renderer process as a psutil process wrapper."""
tab_info = self.GetBrowserInfo()['windows'][0]['tabs'][tab_index]
renderer_id = tab_info['renderer_pid']
if not renderer_id:
self.fail('Can not find the tab renderer process.')
return psutil.Process(renderer_id)
if __name__ == '__main__':
pyauto_functional.Main()
|
|
import time
import os
import numpy
import sys
# OrderedDict is a feature of 2.7 and beyond only
from collections import OrderedDict
from astropy.io import fits as pyfits
from array import array
import pdb
import ctypes
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from donutlib.donutengine import donutengine
from donutlib.donututil import loadImage
from donutlib.donututil import calcStarting
from donutlib.decamutil import decaminfo
class donutfit(object):
""" donutfit is a class used to fit donuts, using donutengine and MINUIT, for the DES experiment
Aaron Roodman (C) SLAC National Accelerator Laboratory, Stanford University 2012.
"""
def __init__(self,**inputDict):
# init contains all initializations which are done only once for all fits
# parameters in fixParamArray1 are nEle,rzero,bkgd,Z2,Z3,Z4,....Z11
self.paramDict = {"nZernikeTerms":11,
"nFits":1,
"fixedParamArray1":[0,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1], #need defaults up to quadrefoil
"debugFlag":False,
"outputWavefront":False,
"outputDiff":True,
"outputChi2":False,
"printLevel":1,
"maxIterations":1000,
"calcRzeroDerivative":True,
"wavefrontMap":None,
"doGridFit":False,
"spacing":64,
"gain":1.0}
# search for key in inputDict, change defaults
self.paramDict.update(inputDict)
# setup the fit engine
self.gFitFunc = donutengine(**self.paramDict)
# need dummy versions before calling self.chisq
self.imgarray = numpy.zeros(1)
self.weight = numpy.ones(1)
self.sigmasq = numpy.ones(1)
# get decam info
self.decamInfo = decaminfo()
# setup MINUIT
self.gMinuit = ROOT.TMinuit(self.gFitFunc.npar)
self.gMinuit.SetFCN( self.chisq )
# arglist is for the parameters in Minuit commands
arglist = array( 'd', 10*[0.] )
ierflg =ctypes.c_int(1982)#L ROOT.Long(1982)
# set the definition of 1sigma
arglist[0] = 1.0
self.gMinuit.mnexcm( "SET ERR", arglist, 1, ierflg )
# turn off Warnings
arglist[0] = 0
self.gMinuit.mnexcm("SET NOWARNINGS", arglist,0,ierflg)
# set printlevel
arglist[0] = self.paramDict["printLevel"]
self.gMinuit.mnexcm("SET PRINTOUT", arglist,1,ierflg)
# do initial setup of Minuit parameters
# status/limit arrays for Minuit parameters
self.startingParam = numpy.zeros(self.gFitFunc.npar)
self.errorParam = numpy.ones(self.gFitFunc.npar)
self.loParam = numpy.zeros(self.gFitFunc.npar)
self.hiParam = numpy.zeros(self.gFitFunc.npar)
self.paramStatusArray = numpy.zeros(self.gFitFunc.npar) # store =0 Floating, =1 Fixed
# Set starting values and step sizes for parameters
# (note that one can redefine the parameters, so this method can be called multiple times)
for ipar in range(self.gFitFunc.npar):
self.gMinuit.DefineParameter(ipar,self.gFitFunc.parNames[ipar],self.startingParam[ipar],self.errorParam[ipar],self.loParam[ipar],self.hiParam[ipar])
# get wavefrontMap object
self.wavefrontMap = self.paramDict['wavefrontMap']
def setupFit(self,**inputFitDict):
""" setup the fit, and do the fit, for a new Donut
"""
# these defaults come from Zemax
# and we use the entry keyed by None as a default if this is not from DECam
# Fixed FN's Z4 signs on 10/4/2012 AJR
# still need to check the signs of the Trefoil terms...
# Changed default zern4 to be 11.0, since fits prefer to start high 10/5/2012 AJR
#
inputZernikeDict = {'None':[0.0,0.0,0.0],
"FS1":[0.0,0.0,11.0,0.0,0.0,0.0,0.0,0.20,-0.17,-0.08],
"FS2":[0.0,0.0,-11.0,0.0,0.0,0.0,0.0,0.26,-0.01,-0.13],
"FS3":[0.0,0.0,11.0,0.0,0.0,0.0,0.0,0.05,0.25,-0.11],
"FS4":[0.0,0.0,-11.0,0.0,0.0,0.0,0.0,-0.05,0.25,-0.14],
"FN1":[0.0,0.0,-11.0,0.0,0.0,0.0,0.0,0.20,0.17,-0.08],
"FN2":[0.0,0.0,11.0,0.0,0.0,0.0,0.0,0.26,0.01,-0.13],
"FN3":[0.0,0.0,-11.0,0.0,0.0,0.0,0.0,0.05,-0.25,-0.11],
"FN4":[0.0,0.0,11.0,0.0,0.0,0.0,0.0,-0.05,-0.25,-0.14] }
# default Dictionary
self.fitDict = {"inputImageArray":None,
"inputFile":"",
"outputPrefix":"test",
"inputZernikeDict":inputZernikeDict,
"inputrzero":None,
"inputnEle":None,
"inputbkgd":None,
"inputZ4max":100.0} #Sept 22, 2014 change default back to 100
# update Dictionary with inputs
self.fitDict.update(inputFitDict)
# if desired print dict
if self.paramDict["printLevel"]>=2:
print(self.fitDict)
# load image either from a file or from an input array
if self.fitDict["inputImageArray"] ==None:
# check for fits in file name
fileName = self.fitDict["inputFile"]
if fileName[-5:] != ".fits" :
fileName = fileName + ".fits"
# get the input file header
hdulist = pyfits.open(fileName)
iextension = 0 # set to other values for test usage
self.inputHeader = hdulist[iextension].header
# get the extension name from the header
if list(self.inputHeader.keys()).count("EXTNAME")>0:
extname = self.inputHeader["EXTNAME"]
if extname == "":
extname = 'None'
else:
extname = 'None'
# also get the IX,IY values from the header
if list(self.inputHeader.keys()).count("IX")>0:
ix = self.inputHeader["IX"]
else:
ix = 0.
if list(self.inputHeader.keys()).count("IY")>0:
iy = self.inputHeader["IY"]
else:
iy = 0.
# calculate position in DECam focal plane
# if this info isn't in the header, just set values to 0,0
if extname != 'None':
try:
xDECam,yDECam = self.decamInfo.getPosition(extname,ix,iy)
except:
xDECam = 0.
yDECam = 0.
else:
xDECam = 0.
yDECam = 0.
# load the Image AJR 9/14/2012 - now assume we are only running on postage stamps - remove ability to
# work on full image, never use that anymore...
### self.imgarray = hdulist[0].data.copy() this caused bugs with some fits files
gain = self.paramDict["gain"] #convert to Nele from Nadu
self.imgarray = gain * hdulist[iextension].data.astype(numpy.float64)
constantError2 = 7.1 * 7.1
self.sigmasq = self.imgarray + constantError2
self.weight = 1.0/self.sigmasq
# close file
hdulist.close()
else:
self.inputHeader = {}
extname = 'None'
self.imgarray = inputImageArray.astype(numpy.float64)
self.weight = 1.0/numpy.sqrt(self.imgarray)
# setup starting Zernike array - start with the inputZernikeDict keyed by extension name
# if wavefrontMap exists, then use that to fill all Zernike coefficients from zern5 to nZernikeSize (zern5 is iZ=3)
# take this from the inputZernikeDict keyed by the value of extname in the header
inputZernikeArray = self.fitDict["inputZernikeDict"][extname]
# fill startingZernikeArray from the inputZernikeArray values
startingZernikeArray = numpy.zeros(self.gFitFunc.nZernikeSize)
for iZ in range(len(inputZernikeArray)):
# starting from 1st used Zernike term
startingZernikeArray[iZ] = inputZernikeArray[iZ]
# if the wavefrontMap is present, overwrite the zernike terms after Focus (starting from 5 and up to nZernikeTerms) to values from Zemax built map
if self.wavefrontMap != None:
anotherZernikeArray = self.wavefrontMap.get(xDECam,yDECam,nZernikeFirst=5,nZernikeLast=self.paramDict["nZernikeTerms"])
for iZ in range(5,self.paramDict["nZernikeTerms"]+1):
startingZernikeArray[iZ-2] = anotherZernikeArray[iZ-5]
self.startingParam[self.gFitFunc.ipar_ZernikeFirst:self.gFitFunc.ipar_ZernikeLast+1] = startingZernikeArray
startingZernikeError = 0.1 * numpy.ones(self.gFitFunc.nZernikeSize)
self.errorParam[self.gFitFunc.ipar_ZernikeFirst:self.gFitFunc.ipar_ZernikeLast+1] = startingZernikeError
# set sign of Focus term
# 3/25/2014 make the maximum Zern4 a setable parameter (for big donuts!)
if numpy.sign(startingZernikeArray[2])==1:
self.loParam[self.gFitFunc.ipar_ZernikeFirst+2] = 0.0
self.hiParam[self.gFitFunc.ipar_ZernikeFirst+2] = self.fitDict["inputZ4max"]
else:
self.loParam[self.gFitFunc.ipar_ZernikeFirst+2] = -1 * self.fitDict["inputZ4max"]
self.hiParam[self.gFitFunc.ipar_ZernikeFirst+2] = 0.0
# set range for x,y tilts, to prevent runaway values
# offset = 4 lambda F * zern_2 or zern_3 = 8.4 micron/ unit of z2,3
# limit to +- 15 pixels = +-27 units, make it +-30
self.loParam[self.gFitFunc.ipar_ZernikeFirst+0] = -30.0
self.hiParam[self.gFitFunc.ipar_ZernikeFirst+0] = 30.0
self.loParam[self.gFitFunc.ipar_ZernikeFirst+1] = -30.0
self.hiParam[self.gFitFunc.ipar_ZernikeFirst+1] = 30.0
# calculate staring values for nele and bkgd or take from paramDict
background,nelectrons = calcStarting(self.imgarray,printLevel=self.paramDict["printLevel"])
inputnEle = self.fitDict["inputnEle"]
if inputnEle!=None:
self.startingParam[self.gFitFunc.ipar_nEle] = inputnEle
self.errorParam[self.gFitFunc.ipar_nEle] = numpy.sqrt(inputnEle)
else:
self.startingParam[self.gFitFunc.ipar_nEle] = nelectrons
self.errorParam[self.gFitFunc.ipar_nEle] = numpy.sqrt(nelectrons)
self.loParam[self.gFitFunc.ipar_nEle] = 0.2*self.startingParam[self.gFitFunc.ipar_nEle]
self.hiParam[self.gFitFunc.ipar_nEle] = 4.0*self.startingParam[self.gFitFunc.ipar_nEle]
inputbkgd = self.fitDict["inputbkgd"]
if inputbkgd!=None:
self.startingParam[self.gFitFunc.ipar_bkgd] = inputbkgd
self.errorParam[self.gFitFunc.ipar_bkgd] = 1.0
self.loParam[self.gFitFunc.ipar_bkgd] = 0.0
self.hiParam[self.gFitFunc.ipar_bkgd] = inputbkgd*10.0
else:
self.startingParam[self.gFitFunc.ipar_bkgd] = background
self.errorParam[self.gFitFunc.ipar_bkgd] = 1.0
self.loParam[self.gFitFunc.ipar_bkgd] = 0.0
self.hiParam[self.gFitFunc.ipar_bkgd] = background*10.0
# rzero parameter, get from argument first then header or otherwise set to 0.2
# March 25,2014 increase limit on rzero from 0.25 to 0.30
# temporary - increase rzero limit to 0.50
inputrzero = self.fitDict["inputrzero"]
if inputrzero == None:
if list(self.inputHeader.keys()).count("RZEROIN")>0:
inputrzero = self.inputHeader["RZEROIN"]
else:
inputrzero = 0.125
self.startingParam[self.gFitFunc.ipar_rzero] = inputrzero
self.errorParam[self.gFitFunc.ipar_rzero] = 0.01
self.loParam[self.gFitFunc.ipar_rzero] = 0.05
self.hiParam[self.gFitFunc.ipar_rzero] = 0.50
# Set starting values and step sizes for parameters
# (note that one can redefine the parameters, so this method can be called multiple times)
for ipar in range(self.gFitFunc.npar):
self.gMinuit.DefineParameter(ipar,self.gFitFunc.parNames[ipar],self.startingParam[ipar],self.errorParam[ipar],self.loParam[ipar],self.hiParam[ipar])
# do the Fit, and repeat as desired with different parameters fixed
postfix = {0:"first",1:"second",2:"third",3:"fourth"}
for iFit in range(self.paramDict["nFits"]):
# fix parameters as desired
fixParamArray = self.paramDict["fixedParamArray"+str(iFit+1)]
self.updateFit(fixParamArray,xDECam,yDECam)
self.doFit()
outputDict = self.outFit(postfix[iFit])
# if desired do the Wavefront fit next
if self.paramDict["doGridFit"]:
self.initWavefrontGrid(self.paramDict['spacing'])
self.initWavefrontFit()
self.doWavefrontFit()
self.outWavefrontFit("gridfit")
# return last output Dictionary
return outputDict
def updateFit(self,fixParamArray,xDECam,yDECam):
# fix parameters as desired
for ipar in range(self.gFitFunc.npar):
if fixParamArray[ipar] != self.paramStatusArray[ipar]:
if fixParamArray[ipar]==0:
self.gMinuit.Release(ipar)
elif fixParamArray[ipar]==1:
self.gMinuit.FixParameter(ipar)
self.paramStatusArray[ipar] = fixParamArray[ipar]
# set x,y DECam values
self.gFitFunc.setXYDECam(xDECam,yDECam)
# reset counters
self.gFitFunc.nCallsCalcAll = 0
self.gFitFunc.nCallsCalcDerivative = 0
def chisq(self,npar, gin, f, par, iflag ):
# convert par to a numpy array
parArr = numpy.zeros(self.gFitFunc.npar)
for ipar in range(self.gFitFunc.npar):
parArr[ipar] = par[ipar]
# call donutengine to calculate image
self.gFitFunc.calcAll(parArr)
# compare to calculated image
diff = self.imgarray - self.gFitFunc.getvImage()
self.pullsq = diff*diff/self.sigmasq
chisquared = self.pullsq.sum()
# printout
if self.paramDict["printLevel"]>=2:
print('donutfit: Chi2 = ',chisquared)
# save parameters for next iteration
self.gFitFunc.savePar()
# return result
f[0] = chisquared
if iflag==2 :
# not currently called in calcAll
self.gFitFunc.calcDerivatives(self.imgarray,self.weight)
dChi2dpar = self.gFitFunc.getDerivatives()
gin.SetSize(self.gFitFunc.npar) # need to handle root bug
#
# fill gin with Derivatives
#
for i in range(self.gFitFunc.npar):
gin[i] = dChi2dpar[i]
#print "donutfit.chisq: ",iflag,f[0]
#print " ",parArr
#if iflag==2:
# print " ",dChi2dpar
def doFit(self):
# arglist is for the parameters in Minuit commands
arglist = array( 'd', 10*[0.] )
ierflg = ctypes.c_int(1982) #L ROOT.Long
# tell Minuit we have derivatives, don't check anymore as long as rzero is fixed
if self.paramStatusArray[self.gFitFunc.ipar_rzero]==1 :
self.gFitFunc.setCalcRzeroDerivativeFalse()
arglist[0] = 1 # =1 means never check the gradient
self.gMinuit.mnexcm( "SET GRADIENT", arglist, 1, ierflg )
else:
self.gFitFunc.setCalcRzeroDerivativeTrue()
arglist[0] = 1 # =1 means never check the gradient (March 12, 2015, rzero derivative code implemented)
# arglist[0] = 0 # =0 means to check gradient each time
self.gMinuit.mnexcm( "SET GRADIENT", arglist, 1, ierflg )
# tell Minuit to use strategy for fastest fits
arglist[0] = 0 # was 1
self.gMinuit.mnexcm( "SET STRATEGY", arglist, 1, ierflg )
# start timer
self.startingtime = time.clock()
# Now ready for minimization step
#self.gMinuit.SetMaxIterations(self.paramDict["maxIterations"])
#self.gMinuit.Migrad()
arglist[0] = self.paramDict["maxIterations"]
arglist[1] = 0.1 # tolerance, default is 0.1
self.gMinuit.mnexcm( "MIGRAD", arglist, 2, ierflg )
# done, check elapsed time
firsttime = time.clock()
self.deltatime = firsttime - self.startingtime
if self.paramDict["printLevel"]>=1:
print('donutfit: Elapsed time fit = ',self.deltatime)
# number of calls
if self.paramDict["printLevel"]>=1:
print('donutfit: Number of CalcAll calls = ',self.gFitFunc.nCallsCalcAll)
print('donutfit: Number of CalcDerivative calls = ',self.gFitFunc.nCallsCalcDerivative)
def outFit(self,postfix,identifier=""):
# get more fit details from MINUIT
amin, edm, errdef = ctypes.c_double(0.18), ctypes.c_double(0.19), ctypes.c_double(0.20)
nvpar, nparx, icstat = ctypes.c_int(1983), ctypes.c_int(1984), ctypes.c_int(1985)
self.gMinuit.mnstat( amin, edm, errdef, nvpar, nparx, icstat )
dof = pow(self.gFitFunc._nPixels,2) - nvpar.value
if self.paramDict["printLevel"]>=1:
mytxt = "amin = %.3f, edm = %.3f, effdef = %.3f, nvpar = %.3f, nparx = %.3f, icstat = %.3f " % (amin.value,edm.value,errdef.value,nvpar.value,nparx.value,icstat.value)
print('donutfit: ',mytxt)
# get fit values and errors
aVal = ctypes.c_double(0.21)
errVal = ctypes.c_double(0.22)
self.paramArray = numpy.zeros(self.gFitFunc.npar)
self.paramErrArray = numpy.zeros(self.gFitFunc.npar)
for ipar in range(self.gFitFunc.npar):
self.gMinuit.GetParameter(ipar,aVal,errVal)
self.paramArray[ipar] = aVal.value
if errVal.value < 1e9 :
self.paramErrArray[ipar] = errVal.value
else:
self.paramErrArray[ipar] = 0.0
# printout parameters in a convenient format
if self.paramDict["printLevel"]>=1:
print(""" "[ """, end=' ')
for ipar in range(self.gFitFunc.ipar_ZernikeFirst,self.gFitFunc.ipar_ZernikeLast):
print(self.paramArray[ipar],",", end=' ')
print(self.paramArray[self.gFitFunc.ipar_ZernikeLast],""" ]" """)
#copy input header information from input file here except for Standard header stuff
stdHeaderDict= {'SIMPLE':0,'BITPIX':0,'NAXIS':0,'NAXIS1':0,'NAXIS2':0,'EXTEND':0}
try:
if sys.version_info.minor>=7 or sys.version_info.major >= 3:
outputHeaderDict = OrderedDict()
except:
outputHeaderDict = {}
for key in list(self.inputHeader.keys()):
if not list(stdHeaderDict.keys()).count(key)>0:
outputHeaderDict[key] = self.inputHeader[key]
# fill output Dictionary
try:
if sys.version_info.minor>=7 or sys.version_info.major >= 3:
outputDict = OrderedDict()
except:
outputDict = {}
outputDict["CHI2"] = float(amin.value)
outputDict["DOF"] = int(dof)
outputDict["FITSTAT"] = int(icstat.value)
outputDict["CLKTIME"] = self.deltatime
outputDict["NCALCALL"] = self.gFitFunc.nCallsCalcAll
outputDict["NCALCDER"] = self.gFitFunc.nCallsCalcDerivative
outputDict["DOF"] = dof
for ipar in range(self.gFitFunc.npar):
outputDict[self.gFitFunc.parNames[ipar]] = float(self.paramArray[ipar])
for ipar in range(self.gFitFunc.npar):
outputDict[self.gFitFunc.parNames[ipar]+"E"] = float(self.paramErrArray[ipar])
# make a single output file, with multiple extensions
# Extension 1: Calculated Image
# Extension 2: Original Image
# Extension 3: Difference (if desired)
# Extension 4: Chi2 (if desired)
# Extension 5: Wavefront (if desired)
hduListOutput = pyfits.HDUList()
primaryOutput = pyfits.PrimaryHDU()
primaryHeader = primaryOutput.header
# fill primary header both with input Header and fit results
for key in outputHeaderDict:
primaryHeader[key] = outputHeaderDict[key]
for key in outputDict:
primaryHeader[key] = outputDict[key]
hduListOutput.append(primaryOutput)
# calculated Donut
calcHdu = pyfits.ImageHDU(self.gFitFunc.getvImage())
calcHeader = calcHdu.header
for key in outputHeaderDict:
calcHeader[key] = outputHeaderDict[key]
for key in outputDict:
calcHeader[key] = outputDict[key]
hduListOutput.append(calcHdu)
# original image
imageHdu = pyfits.ImageHDU(self.imgarray)
imageHeader = imageHdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(imageHdu)
# diff Donut - Calc
if self.paramDict["outputDiff"]:
diffHdu = pyfits.ImageHDU(self.imgarray-self.gFitFunc.getvImage())
diffHeader = diffHdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(diffHdu)
# Chi2 Donut-Calc
if self.paramDict["outputChi2"]:
chi2Hdu = pyfits.ImageHDU(self.pullsq)
chi2Header = chi2Hdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(chi2Hdu)
# Wavefront map
if self.paramDict["outputWavefront"]:
waveHdu = pyfits.ImageHDU(self.gFitFunc.getvPupilMask()*self.gFitFunc.getvPupilWaveZernike())
waveHeader = waveHdu.header
hduListOutput.append(waveHdu)
# file names for output are
# outputPrefix + identifier
if identifier!="" :
outName = self.fitDict["outputPrefix"] + "." + identifier + "." + postfix
else:
outName = self.fitDict["outputPrefix"] + "." + postfix
# write out fits file
outFile = outName + ".donut.fits"
if self.paramDict["printLevel"]>=1:
hduListOutput.info()
hduListOutput.writeto(outFile,overwrite=True)
# add info from input Header for return
outputDict.update(outputHeaderDict)
resultDict = {}
resultDict.update(outputDict)
return resultDict
def initWavefrontGrid(self,spacing):
self.gFitFunc.initWavefrontGrid(spacing)
def initWavefrontFit(self):
# make a new gMinit object
nGrid = self.gFitFunc.getnGrid()
self.wMinuit = ROOT.TMinuit(nGrid)
self.wMinuit.SetFCN(self.wavechisq)
# arglist is for the parameters in Minuit commands
arglist = array( 'd', 10*[0.] )
ierflg =ctypes.c_int(1982)#L ROOT.Long(1982)
# set the definition of 1sigma
arglist[0] = 1.0
self.wMinuit.mnexcm( "SET ERR", arglist, 1, ierflg )
# turn off Warnings
arglist[0] = 0
self.wMinuit.mnexcm("SET NOWARNINGS", arglist,0,ierflg)
# set printlevel
arglist[0] = self.paramDict["printLevel"]
self.wMinuit.mnexcm("SET PRINTOUT", arglist,1,ierflg)
# do initial setup of Minuit parameters
startingwParam = numpy.zeros(nGrid)
errorwParam = 0.01 * numpy.ones(nGrid)
maxwavevalue = 0.5
lowParam = -1.0 * numpy.ones(nGrid) * maxwavevalue
hiwParam = numpy.ones(nGrid) * maxwavevalue
wparamStatusArray = numpy.zeros(nGrid) # store =0 Floating, =1 Fixed
# Set starting values and step sizes for parameters
wparamNames = []
for ipar in range(nGrid):
wparamNames.append("Grid_%d" % (ipar))
self.wMinuit.DefineParameter(ipar,"Grid_%d" % (ipar), startingwParam[ipar], errorwParam[ipar],lowParam[ipar],hiwParam[ipar])
self.nCallsWavefit = 0
def wavechisq(self,npar,gin,f,par,iflag):
# convert par to a numpy array
nGrid = self.gFitFunc.getnGrid()
parArr = numpy.zeros(nGrid) # convert npar to an integer...
for ipar in range(nGrid):
parArr[ipar] = par[ipar]
# call donutengine to calculate image
self.gFitFunc.nCallsCalcAll +=1
self.gFitFunc.makeWavefrontGrid(parArr) # fill deltaWFM from Minuit parameters
self.gFitFunc.calcAll(self.gFitFunc.getParCurrent()) # calculate Image ### BUG - needs fixing to use most recent regular parameters from regular fit
# write out the image here and quit...
if False:
vImage = self.gFitFunc.getvImage()
ftemp = open('image_%d.npy' % self.gFitFunc.nCallsCalcAll, 'wb')
numpy.save(ftemp,vImage)
pdb.set_trace()
# compare to calculated image
diff = self.imgarray - self.gFitFunc.getvImage()
self.pullsq = diff*diff/self.sigmasq
chisquared = self.pullsq.sum()
# printout
#if self.paramDict["printLevel"]>=2:
print('donutfit: wavechisq Chi2 = ',chisquared)
# return result
f[0] = chisquared
# Derivative calculation!
if iflag==2 :
# not currently called in calcAll
self.gFitFunc.calcGridDerivatives(self.imgarray,self.weight)
dChi2dgrid = self.gFitFunc.getGridDerivatives()
gin.SetSize(nGrid) # need to handle root bug
#
# fill gin with Derivatives
#
print("CalcGridDerivatives:")
for i in range(nGrid):
gin[i] = dChi2dgrid[i]
#print(i,gin[i])
def doWavefrontFit(self):
# arglist is for the parameters in Minuit commands
arglist = array( 'd', 10*[0.] )
ierflg = ctypes.c_int(1982) #L ROOT.Long
# tell Minuit we have derivatives, don't check anymore as long as rzero is fixed
arglist[0] = 1 # =1 means never check the gradient
#arglist[0] = 0 # =0 means to check gradient each time
self.wMinuit.mnexcm( "SET GRADIENT", arglist, 1, ierflg )
# tell Minuit to use strategy for fastest fits
arglist[0] = 0 # was 1
self.wMinuit.mnexcm( "SET STRATEGY", arglist, 1, ierflg )
# start timer
self.startingtime = time.clock()
# Now ready for minimization step
self.wMinuit.SetMaxIterations(self.paramDict["maxIterations"])
self.wMinuit.Migrad()
# done, check elapsed time
firsttime = time.clock()
self.deltatime = firsttime - self.startingtime
if self.paramDict["printLevel"]>=1:
print('donutfit: Elapsed time fit = ',self.deltatime)
# number of calls
if self.paramDict["printLevel"]>=1:
print('donutfit wavefront: Number of CalcAll calls = ',self.gFitFunc.nCallsCalcAll)
print('donutfit wavefront: Number of CalcDerivative calls = ',self.gFitFunc.nCallsCalcDerivative)
def outWavefrontFit(self,postfix,identifier=""):
# get more fit details from MINUIT
amin, edm, errdef = ctypes.c_double(0.18), ctypes.c_double(0.19), ctypes.c_double(0.20)
nvpar, nparx, icstat = ctypes.c_int(1983), ctypes.c_int(1984), ctypes.c_int(1985)
self.wMinuit.mnstat( amin, edm, errdef, nvpar, nparx, icstat )
dof = pow(self.gFitFunc._nPixels,2) - nvpar.value
if self.paramDict["printLevel"]>=1:
mytxt = "amin = %.3f, edm = %.3f, effdef = %.3f, nvpar = %.3f, nparx = %.3f, icstat = %.3f " % (amin.value,edm.value,errdef.value,nvpar.value,nparx.value,icstat.value)
print('donutfit wavefront: ',mytxt)
# get fit values and errors
aVal = ctypes.c_double(0.21)
errVal = ctypes.c_double(0.22)
self.paramArray = numpy.zeros(self.gFitFunc.getnGrid())
self.paramErrArray = numpy.zeros(self.gFitFunc.getnGrid())
nGrid = self.gFitFunc.getnGrid()
for ipar in range(nGrid):
self.wMinuit.GetParameter(ipar,aVal,errVal)
self.paramArray[ipar] = aVal.value
if errVal.value < 1e9 :
self.paramErrArray[ipar] = errVal.value
else:
self.paramErrArray[ipar] = 0.0
#copy input header information from input file here except for Standard header stuff
stdHeaderDict= {'SIMPLE':0,'BITPIX':0,'NAXIS':0,'NAXIS1':0,'NAXIS2':0,'EXTEND':0}
outputHeaderDict = OrderedDict()
for key in list(self.inputHeader.keys()):
if not list(stdHeaderDict.keys()).count(key)>0:
outputHeaderDict[key] = self.inputHeader[key]
# fill output Dictionary
try:
if sys.version_info.minor>=7 or sys.version_info.major >= 3:
outputDict = OrderedDict()
except:
outputDict = {}
outputDict["CHI2"] = float(amin.value)
outputDict["DOF"] = int(dof)
outputDict["FITSTAT"] = int(icstat.value)
outputDict["CLKTIME"] = self.deltatime
outputDict["NCALCALL"] = self.gFitFunc.nCallsCalcAll
outputDict["NCALCDER"] = self.gFitFunc.nCallsCalcDerivative
outputDict["DOF"] = dof
# do I use this anywhere?
#for ipar in range(nGrid):
# outputDict[self.gFitFunc.parNames[ipar]] = float(self.paramArray[ipar])
#for ipar in range(nGrid):
# outputDict[self.gFitFunc.parNames[ipar]+"E"] = float(self.paramErrArray[ipar])
# make a single output file, with multiple extensions
# Extension 1: Calculated Image
# Extension 2: Original Image
# Extension 3: Difference (if desired)
# Extension 4: Chi2 (if desired)
# Extension 5: Wavefront (if desired)
hduListOutput = pyfits.HDUList()
primaryOutput = pyfits.PrimaryHDU()
primaryHeader = primaryOutput.header
# fill primary header both with input Header and fit results
for key in outputHeaderDict:
primaryHeader[key] = outputHeaderDict[key]
for key in outputDict:
primaryHeader[key] = outputDict[key]
hduListOutput.append(primaryOutput)
# calculated Donut
calcHdu = pyfits.ImageHDU(self.gFitFunc.getvImage())
calcHeader = calcHdu.header
for key in outputHeaderDict:
calcHeader[key] = outputHeaderDict[key]
for key in outputDict:
calcHeader[key] = outputDict[key]
hduListOutput.append(calcHdu)
# original image
imageHdu = pyfits.ImageHDU(self.imgarray)
imageHeader = imageHdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(imageHdu)
# diff Donut - Calc
if self.paramDict["outputDiff"]:
diffHdu = pyfits.ImageHDU(self.imgarray-self.gFitFunc.getvImage())
diffHeader = diffHdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(diffHdu)
# Chi2 Donut-Calc
if self.paramDict["outputChi2"]:
chi2Hdu = pyfits.ImageHDU(self.pullsq)
chi2Header = chi2Hdu.header
for key in outputHeaderDict:
imageHeader[key] = outputHeaderDict[key]
hduListOutput.append(chi2Hdu)
# Wavefront map
if self.paramDict["outputWavefront"]:
waveHdu = pyfits.ImageHDU(self.gFitFunc.getvPupilMask()*self.gFitFunc.getvPupilWaveZernike())
waveHeader = waveHdu.header
hduListOutput.append(waveHdu)
# Wavefront Grid map
gridHdu = pyfits.ImageHDU(self.gFitFunc.getvDeltaWFM())
gridHeader = gridHdu.header
hduListOutput.append(gridHdu)
# file names for output are
# outputPrefix + identifier
if identifier!="" :
outName = self.fitDict["outputPrefix"] + "." + identifier + "." + postfix
else:
outName = self.fitDict["outputPrefix"] + "." + postfix
# write out fits file
outFile = outName + ".donut.fits"
if self.paramDict["printLevel"]>=1:
hduListOutput.info()
hduListOutput.writeto(outFile,overwrite=True)
# add info from input Header for return
outputDict.update(outputHeaderDict)
resultDict = {}
resultDict.update(outputDict)
return resultDict
|
|
'''
Given a list of candidate public domain book titles and authors attempt to find the best matching
OpenLibrary records with full text available on Internet Archive.
'''
import codecs
import xml.etree.ElementTree as ET
import pymarc
from requests import ConnectionError
import requests_cache
import time
CACHE_DIR = '../cache/'
DATA_DIR = '../data/'
RATE = 2.0 # requests/second
count = 0
requests_cache.install_cache('openlibrary')
session = requests_cache.CachedSession()
def make_throttle_hook(timeout=1.0):
"""
Returns a response hook function which sleeps for `timeout` seconds if
response is not cached
"""
def hook(response, **kwargs):
if not getattr(response, 'from_cache', False):
time.sleep(timeout)
return response
return hook
session.hooks = {'response': make_throttle_hook(1.0/RATE)}
def search_open_library(author,title, language):
result = []
author = author.strip()
author = ','.join(author.split(',')[0:2])
if author and author[-1] == '.':
author = author[0:-1]
payload = {'has_fulltext' : 'true', 'title': title} #, 'language': language}
if author:
payload.update({'author': author})
response = session.get('http://openlibrary.org/search.json',params=payload)
if response.status_code == 200:
docs = response.json()['docs']
result = docs
else:
print 'Request failed',response.status_code,payload
return result
def check_language(lang,doc):
if 'language' in doc:
if lang in doc['language']:
return True
else:
print 'Non-English version ',doc['key'],doc['language']
else:
# TODO:
#aid = doc['ocaid']
#marc_url = 'https://archive.org/download/%s/%s_marc.xml' % (aid,aid)
# download MARC
# check for 'English' in 240$l
print 'Unknown language ',doc['key']
return False
def all_editions(doc):
'''
Merge the contents of the two IA editions fields
(one may be a superset of the other, but lets just be safe)
'''
editions = set()
for k in ('ia_loaded_id','ia'):
if k in doc:
editions.update(doc[k])
print len(editions), ' editions'
return editions
def get_json(url):
response = session.get(url)
if response.status_code == 200:
edition = response.json()
return edition
else:
print 'Failed to get JSON for %s - status code %d' % (url,response.status_code)
def get_ia_edition(iaid):
'''Get JSON for an edition using its IA identifier. Follows non-HTTP OpenLibrary redirect records '''
edition_url = 'http://openlibrary.org/books/ia:%s.json' % iaid
edition = get_json(edition_url)
if edition and 'type' in edition and 'key' in edition['type'] and edition['type']['key'] == '/type/redirect':
edition_url = 'http://openlibrary.org%s.json' % edition['location']
edition = get_json(edition_url)
return edition
def get_file(iaid,suffix,body=False):
'''
Test whether a file in the given format is available for an Internet Archive ID.
Follows redirects if necessary.
If the "suffix" parameter doesn't contain a period, one will be prepended.
This allows both "epub" and "_files.xml" style suffixes.
if body=False (default), the content will not be fetched. Redirects are followed and the
final URL is returned. If this is false, the actual content will be fetched and returned.
'''
#files = session.get('http://archive.org/download/%s/%s_files.xml' % (iaid,iaid))
if suffix.find('.') < 0:
suffix = '.'+suffix
url = 'http://archive.org/download/%s/%s%s' % (iaid,iaid,suffix)
try:
if not body:
epub = session.head(url)
else:
epub = session.get(url)
if epub.status_code == 302:
url = epub.headers['location']
#print 'Redirecting to ',url
if not body:
epub = session.head(url)
else:
epub = session.get(url)
if epub.status_code == 200:
return epub if body else url
elif epub.status_code != 403:
print 'HTTP error (%d) fetching %s for %s from %s ' % (epub.status_code,suffix,iaid,url)
except ConnectionError as e:
print 'Error fetching URL: ',url,e
return None
def rate_wait(time):
last = now()
while True:
yield()
def merge(base, added):
keys = [b['key'] for b in base]
base += [a for a in added if not a['key'] in keys]
return base
def publicdomain(date):
if not date:
return False
try:
return int(date) < 1923
except ValueError:
return False
def get_files(ia):
suffix = '_files.xml'
filename = CACHE_DIR + ia + suffix
root = None
# check cache
try:
with file(filename, 'r') as cachefile:
root = ET.parse(cachefile).getroot()
except IOError:
files_xml = get_file(ia,suffix,body=True)
if files_xml and files_xml.status_code == 200:
with file(filename, 'w') as output:
output.write(files_xml.content)
root = ET.fromstring(files_xml.content)
if root is not None:
files = [f.get('name') for f in root.findall('file')]
return files
def find_file(files,suffix):
if files and suffix:
for f in files:
if f[-len(suffix):len(f)] == suffix:
return f
def main():
with codecs.open(DATA_DIR+'SCCL-classics-ebook-candidates.tsv','w',encoding='utf-8') as output:
count = 0
for line in codecs.open(DATA_DIR+'SCCL-classics-edition-author-work.tsv', encoding='utf-8'):
count += 1
if count == 1:
continue # skip header line
title,author,work_title = line.rstrip('\n').split('\t')
title = title.split(':')[0].strip() # main title only
docs = search_open_library(author, title, 'eng')
print '\n%d OpenLibrary works found for %s by %s:' % (len(docs),title,author)
if work_title and work_title != title:
before = len(docs)
docs = merge(docs,search_open_library(author, title, 'eng'))
added = len(docs) - before
if added:
print 'Added %d new search results' % added
if not docs:
# Output a blank record so we know it got no matches
print 'No matches for %s by %s' % (title, author)
output.write('\t'.join([title, author])+'\n')
for doc in docs:
key = doc['key']
#if not 'public_scan_b' in doc or doc['public_scan_b']: # not reliable
if True or check_language('eng',doc):
nonIA = 0
for e in all_editions(doc):
edition = get_ia_edition(e)
if edition and 'ocaid' in edition:
ia = edition['ocaid']
key = edition['key']
date = edition['publish_date'] if 'publish_date' in edition else ''
if publicdomain(date):
#print date
files = get_files(ia)
# ePub is not listed typically
#epub_url = find_file(files,'epub')
abbyy_url = find_file(files,'_abbyy.gz')
marc_url = find_file(files,'_meta.mrc')
if abbyy_url and marc_url:
response = get_file(ia,'_meta.mrc',body=True)
if response and response.status_code == 200:
try:
marcfile = pymarc.MARCReader(response.content)
marc = marcfile.next()
field = marc['008'].value()
yr = field[7:11]
lang = field[35:38]
print yr, lang
ol_edition_url = 'http://openlibrary.org' + key
#print '\t'.join([ date, ol_edition_url, epub_url])
if lang == 'eng':
# We have a winner! - write it to our output file.
output.write('\t'.join([title, author, date, ol_edition_url])+'\n')
else:
print 'Skipping lang: ',lang
except:
print 'Failed to parse MARC record ',marc_url
else:
print 'No MARC record ',ia,key
else:
print 'Skipping ',ia,key,abbyy_url,marc_url
else:
nonIA += 1
print 'OL edition record unexpectedly missing "ocaid" key',edition
# print 'Editions with no IA equiv = %d' % nonIA
print count,title,author
main()
|
|
from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from ssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],
type=str, help='VOC or COCO')
parser.add_argument('--dataset_root', default=VOC_ROOT,
help='Dataset root directory path')
parser.add_argument('--basenet', default='vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--start_iter', default=0, type=int,
help='Resume training at this iter')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--visdom', default=False, type=str2bool,
help='Use visdom for loss visualization')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models')
args = parser.parse_args()
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def train():
if args.dataset == 'COCO':
if args.dataset_root == VOC_ROOT:
if not os.path.exists(COCO_ROOT):
parser.error('Must specify dataset_root if specifying dataset')
print("WARNING: Using default COCO dataset_root because " +
"--dataset_root was not specified.")
args.dataset_root = COCO_ROOT
cfg = coco
dataset = COCODetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
elif args.dataset == 'VOC':
if args.dataset_root == COCO_ROOT:
parser.error('Must specify dataset if specifying dataset_root')
cfg = voc
dataset = VOCDetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
if args.visdom:
import visdom
viz = visdom.Visdom()
ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
net = ssd_net
if args.cuda:
net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True
if args.resume:
print('Resuming training, loading {}...'.format(args.resume))
ssd_net.load_weights(args.resume)
else:
vgg_weights = torch.load(args.save_folder + args.basenet)
print('Loading base network...')
ssd_net.vgg.load_state_dict(vgg_weights)
if args.cuda:
net = net.cuda()
if not args.resume:
print('Initializing weights...')
# initialize newly added layers' weights with xavier method
ssd_net.extras.apply(weights_init)
ssd_net.loc.apply(weights_init)
ssd_net.conf.apply(weights_init)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
False, args.cuda)
net.train()
# loss counters
loc_loss = 0
conf_loss = 0
epoch = 0
print('Loading the dataset...')
epoch_size = len(dataset) // args.batch_size
print('Training SSD on:', dataset.name)
print('Using the specified args:')
print(args)
step_index = 0
if args.visdom:
vis_title = 'SSD.PyTorch on ' + dataset.name
vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
# create batch iterator
batch_iterator = iter(data_loader)
for iteration in range(args.start_iter, cfg['max_iter']):
if args.visdom and iteration != 0 and (iteration % epoch_size == 0):
update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,
'append', epoch_size)
# reset epoch loss counters
loc_loss = 0
conf_loss = 0
epoch += 1
if iteration in cfg['lr_steps']:
step_index += 1
adjust_learning_rate(optimizer, args.gamma, step_index)
# load train data
images, targets = next(batch_iterator)
if args.cuda:
images = Variable(images.cuda())
targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
else:
images = Variable(images)
targets = [Variable(ann, volatile=True) for ann in targets]
# forward
t0 = time.time()
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, targets)
loss = loss_l + loss_c
loss.backward()
optimizer.step()
t1 = time.time()
loc_loss += loss_l.data[0]
conf_loss += loss_c.data[0]
if iteration % 10 == 0:
print('timer: %.4f sec.' % (t1 - t0))
print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')
if args.visdom:
update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],
iter_plot, epoch_plot, 'append')
if iteration != 0 and iteration % 5000 == 0:
print('Saving state, iter:', iteration)
torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +
repr(iteration) + '.pth')
torch.save(ssd_net.state_dict(),
args.save_folder + '' + args.dataset + '.pth')
def adjust_learning_rate(optimizer, gamma, step):
"""Sets the learning rate to the initial LR decayed by 10 at every
specified step
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
lr = args.lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def xavier(param):
init.xavier_uniform(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
def create_vis_plot(_xlabel, _ylabel, _title, _legend):
return viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel=_xlabel,
ylabel=_ylabel,
title=_title,
legend=_legend
)
)
def update_vis_plot(iteration, loc, conf, window1, window2, update_type,
epoch_size=1):
viz.line(
X=torch.ones((1, 3)).cpu() * iteration,
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
win=window1,
update=update_type
)
# initialize epoch plot on first iteration
if iteration == 0:
viz.line(
X=torch.zeros((1, 3)).cpu(),
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
win=window2,
update=True
)
if __name__ == '__main__':
train()
|
|
# -*- coding: utf-8 -*-
'''
Corpus DAO - ISF Corpus management functions
'''
# This code is a part of coolisf library: https://github.com/letuananh/intsem.fx
# :copyright: (c) 2014 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
# NOTE: This is used to be a parted of VisualKopasu, but relicensed
# & migrated into coolISF
import os
import os.path
import logging
from texttaglib.puchikarui import Schema, with_ctx
from texttaglib.chirptext import ttl
from coolisf.util import is_valid_name
from coolisf.model import Corpus, Document, Sentence, Reading
from coolisf.model import MRS, DMRS, DMRSLayout, Node, Sense, SortInfo, Link, Predicate
from coolisf.model import GpredValue, Lemma
from coolisf.model import Word, Concept, CWLink
# ----------------------------------------------------------------------
# Configuration
# ----------------------------------------------------------------------
logger = logging.getLogger(__name__)
MY_DIR = os.path.dirname(os.path.realpath(__file__))
INIT_SCRIPT = os.path.join(MY_DIR, 'scripts', 'init_corpus.sql')
class RichKopasu(Schema):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_file(INIT_SCRIPT)
self.add_table('corpus', ['ID', 'name', 'title'], proto=Corpus).set_id('ID')
self.add_table('document', ['ID', 'name', 'corpusID', 'title',
'grammar', 'tagger', 'parse_count', 'lang'],
proto=Document, alias='doc').set_id('ID')
self.add_table('sentence', ['ID', 'ident', 'text', 'docID', 'flag', 'comment'],
proto=Sentence).set_id('ID')
self.add_table('reading', ['ID', 'ident', 'mode', 'sentID', 'comment'],
proto=Reading).set_id('ID').field_map(ident='rid')
self.add_table('mrs', ['ID', 'ident', 'raw', 'readingID'],
proto=MRS).set_id('ID')
self.add_table('dmrs', ['ID', 'ident', 'cfrom', 'cto', 'surface', 'raw', 'readingID'],
proto=DMRS).set_id('ID')
# Node related tables
self.add_table('dmrs_node', ['ID', 'nodeid', 'cfrom', 'cto', 'surface', 'base',
'carg', 'dmrsID', 'rplemmaID', 'rppos', 'rpsense',
'gpred_valueID', 'synsetid', 'synset_score'],
proto=Node, alias='node').set_id('ID')
self.add_table('dmrs_node_sortinfo', ['ID', 'cvarsort', 'num', 'pers', 'gend', 'sf',
'tense', 'mood', 'prontype', 'prog', 'perf',
'ind', 'dmrs_nodeID'],
proto=SortInfo, alias='sortinfo').set_id('ID')
self.add_table('dmrs_node_gpred_value', ['ID', 'value'],
proto=GpredValue, alias='gpval').set_id('ID')
self.add_table('dmrs_node_realpred_lemma', ['ID', 'lemma'],
proto=Lemma, alias='rplemma').set_id('ID')
# Link related tables
self.add_table('dmrs_link', ['ID', 'fromNodeID', 'toNodeID', 'dmrsID', 'post', 'rargname'],
proto=Link, alias='link').set_id('ID').field_map(fromNodeID="from_nodeid", toNodeID="to_nodeid")
# Human annotation related tables
self.add_table('word', ['ID', 'sid', 'widx', 'word', 'lemma', 'pos', 'cfrom', 'cto', 'comment'],
proto=Word).set_id('ID')
self.add_table('concept', ['ID', 'sid', 'cidx', 'clemma', 'tag', 'flag', 'comment'],
proto=Concept).set_id('ID')
self.add_table('cwl', ['cid', 'wid'],
proto=CWLink)
class CachedTable():
'''
ORM cache
@auto_fill: Auto select all objects to cache when the cache is created
'''
def __init__(self, table, cache_by_field="value", ctx=None, auto_fill=True):
self.cacheMap = {}
self.cacheMapByID = {}
self.table = table
self.cache_by_field = cache_by_field
if auto_fill:
instances = self.table.select(ctx=ctx)
if instances is not None:
for instance in instances:
self.cache(instance)
def cache(self, instance):
if instance:
key = getattr(instance, self.cache_by_field)
if key not in self.cacheMap:
self.cacheMap[key] = instance
else:
logger.debug(("Cache error: key [%s] exists!" % key))
key = tuple(getattr(instance, c) for c in self.table.id_cols)
if key not in self.cacheMapByID:
self.cacheMapByID[key] = instance
else:
logger.debug(("Cache error: ID [%s] exists!" % key))
def by_value(self, value, new_object=None, ctx=None):
if value not in self.cacheMap:
# insert a new record
if new_object is None:
# try to select from database first
results = self.table.select_single("{f}=?".format(f=self.cache_by_field), (value,), ctx=ctx)
if not results:
# create a new instance
new_object = self.table.to_obj((value,), (self.cache_by_field,))
self.table.save(new_object, ctx=ctx)
# select the instance again
new_object = self.table.select_single("{f}=?".format(f=self.cache_by_field), (value,), ctx=ctx)
else:
new_object = results # Use the object from DB
self.cache(new_object)
return self.cacheMap[value]
def by_id(self, *ID, ctx=None):
k = tuple(ID)
if k not in self.cacheMapByID:
# select from database
obj = self.table.by_id(*ID, ctx=ctx)
self.cache(obj)
return self.cacheMapByID[k]
class CorpusDAOSQLite(RichKopasu):
def __init__(self, data_source=":memory:", name='', auto_fill=False, *args, **kwargs):
super().__init__(data_source, *args, **kwargs)
self.name = name
self.lemmaCache = CachedTable(self.rplemma, "lemma", auto_fill=auto_fill)
self.gpredCache = CachedTable(self.gpval, "value", auto_fill=auto_fill)
@property
def db_path(self):
return self.ds.path
@with_ctx
def get_corpus(self, corpus_name, ctx=None):
return ctx.corpus.select_single('name=?', (corpus_name,))
@with_ctx
def create_corpus(self, corpus_name, ctx=None):
if not is_valid_name(corpus_name):
raise Exception("Invalid corpus name (provided: {}) - Visko only accept names using alphanumeric characters".format(corpus_name))
corpus = Corpus(corpus_name)
corpus.ID = ctx.corpus.save(corpus)
return corpus
@with_ctx
def save_doc(self, doc, *fields, ctx=None):
if not is_valid_name(doc.name):
raise ValueError("Invalid doc name (provided: {}) - Visko only accept names using alphanumeric characters".format(doc.name))
else:
doc.ID = self.doc.save(doc, fields, ctx=ctx)
return doc
@with_ctx
def get_docs(self, corpusID, ctx=None):
corpus = ctx.corpus.by_id(corpusID)
docs = ctx.doc.select('corpusID=?', (corpus.ID,))
for doc in docs:
doc.corpus = corpus
q = "SELECT COUNT(*) FROM sentence WHERE docID = ?"
p = (doc.ID,)
doc.sent_count = ctx.select_scalar(q, p)
return docs
@with_ctx
def get_doc(self, doc_name, ctx=None):
# doc.name is unique
doc = ctx.doc.select_single('name=?', (doc_name,))
if doc is None:
return None
doc.corpus = ctx.corpus.by_id(doc.corpusID)
q = "SELECT COUNT(*) FROM sentence WHERE docID = ?"
p = (doc.ID,)
doc.sent_count = ctx.select_scalar(q, p)
return doc
@with_ctx
def get_sents(self, docID, flag=None, add_dummy_parses=True, page=None, pagesize=1000, ctx=None):
where = ['docID = ?']
params = [docID]
limit = None
if flag is not None:
where.append('flag = ?')
params.append(flag)
if page is not None:
offset = page * pagesize
limit = "{}, {}".format(offset, pagesize)
sents = ctx.sentence.select(' AND '.join(where), params, limit=limit)
if add_dummy_parses:
for sent in sents:
reading_count = ctx.select_scalar('SELECT COUNT(*) FROM reading WHERE sentid=?', (sent.ID,))
sent.readings = [None] * reading_count
return sents
@with_ctx
def note_sentence(self, sent_id, comment, ctx=None):
# save comments
return ctx.sentence.update((comment,), 'ID=?', (sent_id,), ['comment'])
@with_ctx
def read_note_sentence(self, sent_id, ctx=None):
return ctx.sentence.by_id(sent_id, columns=['comment']).comment
@with_ctx
def save_sent(self, a_sentence, ctx=None):
"""
Save sentence object (with all DMRSes, raws & shallow readings inside)
"""
# validations
if a_sentence is None:
raise ValueError("Sentence object cannot be None")
# ctx is not None now
if not a_sentence.ID:
# choose a new ident
if a_sentence.ident is None or a_sentence.ident in (-1, '-1', ''):
# create a new ident (it must be a string)
a_sentence.ident = str(ctx.select_scalar('SELECT IFNULL(max(rowid), 0)+1 FROM sentence'))
if a_sentence.ident is None:
a_sentence.ident = "1"
# save sentence
a_sentence.ID = ctx.sentence.save(a_sentence)
# save shallow
if a_sentence.shallow is not None:
self.save_annotations(a_sentence, ctx=ctx)
# save readings
for idx, reading in enumerate(a_sentence.readings):
if reading.rid is None:
reading.rid = idx
# Update sentID
reading.sentID = a_sentence.ID
self.save_reading(reading, ctx=ctx)
else:
# update sentence
pass
# Select sentence
return a_sentence
@with_ctx
def save_reading(self, reading, ctx=None):
# save or update reading info
reading.ID = ctx.reading.save(reading) if reading.ID is None else reading.ID
# Save DMRS
dmrs = reading.dmrs()
# store raw if needed
if dmrs.raw is None:
dmrs.raw = dmrs.xml_str(pretty_print=False)
dmrs.readingID = reading.ID
if dmrs.ident is None:
dmrs.ident = reading.rid
dmrs.ID = None # reset DMRS ID
dmrs.ID = ctx.dmrs.save(dmrs)
# nodes and links are in layout
# save nodes
for node in dmrs.layout.nodes:
node.dmrsID = dmrs.ID
# save realpred
node.pred = node.predstr
if node.rplemma:
# Escape lemma
lemma = self.lemmaCache.by_value(node.rplemma, ctx=ctx)
node.rplemmaID = lemma.ID
# save gpred
if node.gpred:
gpred_value = self.gpredCache.by_value(node.gpred, ctx=ctx)
node.gpred_valueID = gpred_value.ID
# save sense
if node.sense:
node.synsetid = node.sense.synsetid
node.synset_score = node.sense.score
elif node.nodeid in dmrs.tags:
tags = dmrs.tags[node.nodeid]
tag = tags[0]
for t in tags[1:]:
if t.method == ttl.Tag.GOLD:
tag = t
break
node.synsetid = tag.synset.ID.to_canonical()
node.synset_score = tag.synset.tagcount
# reset node ID
node.ID = None
node.ID = ctx.node.save(node)
# save sortinfo
node.sortinfo.dmrs_nodeID = node.ID
node.sortinfo.ID = None # reset sortinfo ID
ctx.sortinfo.save(node.sortinfo)
# save links
for link in dmrs.layout.links:
link.dmrsID = dmrs.ID
if link.rargname is None:
link.rargname = ''
link.ID = None # reset link ID
link.ID = ctx.link.save(link)
@with_ctx
def get_reading(self, a_reading, ctx=None):
# retrieve all DMRSes
# right now, only 1 DMRS per reading
a_dmrs = ctx.dmrs.select_single('readingID=?', (a_reading.ID,))
a_reading._dmrs = a_dmrs
a_dmrs.reading = a_reading
a_dmrs._layout = DMRSLayout(source=a_dmrs)
# retrieve all nodes
nodes = ctx.node.select('dmrsID=?', (a_dmrs.ID,))
for a_node in nodes:
# retrieve sortinfo
sortinfo = ctx.sortinfo.select_single('dmrs_nodeID=?', (a_node.ID,))
if sortinfo is not None:
a_node.sortinfo = sortinfo
if a_node.rplemmaID is not None:
# is a realpred
a_node.rplemma = self.lemmaCache.by_id(int(a_node.rplemmaID), ctx=ctx).lemma
a_node.pred = Predicate(Predicate.REALPRED, a_node.rplemma, a_node.rppos, a_node.rpsense)
if a_node.gpred_valueID:
# is a gpred
a_node.pred = self.gpredCache.by_id(int(a_node.gpred_valueID), ctx=ctx).value
# a_node.pred = Predicate.from_string(a_node.gpred)
# create sense object
if a_node.synsetid:
sense = Sense()
sense.synsetid = a_node.synsetid
sense.score = a_node.synset_score
sense.lemma = a_node.rplemma if a_node.rplemma else '' # this also?
sense.pos = a_node.synsetid[-1] # Do we really need this?
a_node.sense = sense
a_dmrs.tag_node(a_node.nodeid, sense.synsetid, sense.lemma, ttl.Tag.DEFAULT, sense.score)
a_dmrs.layout.add_node(a_node)
# next node ...
# retrieve all links
links = ctx.link.select('dmrsID=?', (a_dmrs.ID,))
for link in links:
a_dmrs.layout.add_link(link)
return a_reading
@with_ctx
def delete_reading(self, readingID, ctx=None):
# delete all DMRS link, node
ctx.dmrs_link.delete('dmrsID IN (SELECT ID FROM dmrs WHERE readingID=?)', (readingID,))
ctx.dmrs_node_sortinfo.delete('dmrs_nodeID IN (SELECT ID FROM dmrs_node WHERE dmrsID IN (SELECT ID from dmrs WHERE readingID=?))', (readingID,))
ctx.dmrs_node.delete('dmrsID IN (SELECT ID FROM dmrs WHERE readingID=?)', (readingID,))
# delete all DMRS
ctx.dmrs.delete("readingID=?", (readingID,))
# delete readings
ctx.reading.delete("ID=?", (readingID,))
@with_ctx
def update_reading(self, reading, ctx=None):
# delete all DMRS link, node
ctx.dmrs_link.delete('dmrsID IN (SELECT ID FROM dmrs WHERE readingID=?)', (reading.ID,))
ctx.dmrs_node_sortinfo.delete('dmrs_nodeID IN (SELECT ID FROM dmrs_node WHERE dmrsID IN (SELECT ID from dmrs WHERE readingID=?))', (reading.ID,))
ctx.dmrs_node.delete('dmrsID IN (SELECT ID FROM dmrs WHERE readingID=?)', (reading.ID,))
# delete all DMRS
ctx.dmrs.delete("readingID=?", (reading.ID,))
# update reading info
self.save_reading(reading, ctx=ctx)
@with_ctx
def build_search_result(self, rows, with_comment=True, ctx=None):
''' build search result from query results
Format: sentID, readingID, text, sentence_ident, docID, doc_name, corpus_name, corpusID
'''
if rows:
logger.debug(("Found: %s presentation(s)" % len(rows)))
else:
logger.debug("None was found!")
return []
sentences = []
sentences_by_id = {}
for row in rows:
readingID = row['readingID']
sentID = row['sentID']
sentence_ident = row['sentence_ident']
text = row['text']
docID = row['docID']
if sentID in sentences_by_id:
# sentence exists, add this reading to that sentence
a_reading = Reading(ID=readingID)
# self.get_reading(a_reading)
sentences_by_id[sentID].readings.append(a_reading)
else:
a_sentence = Sentence(ident=sentence_ident, text=text, docID=docID, ID=sentID)
a_sentence.corpus = Corpus(name=row['corpus_name'], ID=row['corpusID'])
a_sentence.doc = Document(name=row['doc_name'], ID=docID)
if readingID:
# add reading if needed
a_reading = Reading(ID=readingID)
a_sentence.readings.append(a_reading)
sentences.append(a_sentence)
sentences_by_id[sentID] = a_sentence
logger.debug(("Sentence count: %s" % len(sentences)))
if with_comment:
for sent in sentences:
sent.comment = self.read_note_sentence(sent_id=sent.ID, ctx=ctx)
return sentences
@with_ctx
def get_sent(self, sentID, mode=None, readingIDs=None, skip_details=False, ctx=None):
a_sentence = ctx.sentence.by_id(sentID)
if a_sentence is not None:
self.get_annotations(sentID, a_sentence, ctx=ctx)
# retrieve all readings
conditions = 'sentID=?'
params = [a_sentence.ID]
if mode:
conditions += ' AND mode=?'
params.append(mode)
if readingIDs and len(readingIDs) > 0:
conditions += ' AND ID IN ({params_holder})'.format(params_holder=",".join((["?"] * len(readingIDs))))
params.extend(readingIDs)
readings = ctx.reading.select(conditions, params)
for r in readings:
r.sent = a_sentence
a_sentence.readings.append(r)
for a_reading in a_sentence.readings:
if not skip_details:
self.get_reading(a_reading, ctx=ctx)
else:
logging.debug("No sentence with ID={} was found".format(sentID))
# Return
return a_sentence
@with_ctx
def delete_sent(self, sentID, ctx=None):
# delete all reading
sent = self.get_sent(sentID, skip_details=True, ctx=ctx)
# delete readings
if sent is not None:
for i in sent:
self.delete_reading(i.ID, ctx=ctx)
# delete words, concepts, cwl
ctx.word.delete('sid=?', (sentID,))
ctx.cwl.delete('cid IN (SELECT cid FROM concept WHERE sid=?)', (sentID,))
ctx.concept.delete('sid=?', (sentID,))
# delete sentence obj
ctx.sentence.delete("ID=?", (sentID,))
@with_ctx
def get_annotations(self, sentID, sent_obj=None, ctx=None):
if sent_obj is None:
sent_obj = self.get_sent(sentID, skip_details=True, ctx=ctx)
# select words
# select concepts
sent_obj.words = ctx.word.select("sid=?", (sentID,))
wmap = {w.ID: w for w in sent_obj.words}
sent_obj.concepts = ctx.concept.select("sid=?", (sentID,))
cmap = {c.ID: c for c in sent_obj.concepts}
# link concept-word
links = ctx.cwl.select("cid IN (SELECT ID from concept WHERE sid=?)", (sentID,))
for lnk in links:
cmap[lnk.cid].words.append(wmap[lnk.wid])
return sent_obj
@with_ctx
def save_annotations(self, sent_obj, ctx=None):
for word in sent_obj.words:
word.sid = sent_obj.ID
word.ID = ctx.word.save(word)
for concept in sent_obj.concepts:
concept.sid = sent_obj.ID
concept.ID = ctx.concept.save(concept)
for word in concept.words:
# save links
logger.debug("Saving", CWLink(wid=word.ID, cid=concept.ID))
ctx.cwl.save(CWLink(wid=word.ID, cid=concept.ID))
pass
@with_ctx
def flag_sent(self, sid, flag, ctx=None):
# update flag
return ctx.sentence.update(new_values=(flag,), where='ID=?', where_values=(sid,), columns=('flag',))
@with_ctx
def next_sentid(self, sid, flag=None, ctx=None):
sent_obj = ctx.sentence.by_id(sid, columns=('ID', 'docID'))
docid = sent_obj.docID
where = 'ID > ? AND docID == ?'
params = [sid, docid]
if flag is not None:
where += " AND flag = ?"
params.append(flag)
next_sent = ctx.sentence.select_single(where=where, values=params, orderby="docID, ID", limit=1)
return next_sent.ID if next_sent is not None else None
@with_ctx
def prev_sentid(self, sid, flag=None, ctx=None):
sent_obj = ctx.sentence.by_id(sid, columns=('ID', 'docID'))
docid = sent_obj.docID
where = 'ID < ? AND docID == ?'
params = [sid, docid]
if flag is not None:
where += " AND flag = ?"
params.append(flag)
prev_sent = ctx.sentence.select_single(where=where, values=params, orderby="docID DESC, ID DESC", limit=1)
return prev_sent.ID if prev_sent is not None else None
|
|
#!/usr/bin/python
"""Make a set of virtual machines and use them to run tests;
specifically this script runs Selenium automated tests of a website;
specifically this script should allow load testing, which so far
hasn't been something Selenium has beeen used for.
As implemented uses virtual machine snapshots.
Example use of NrvrCommander.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
from collections import namedtuple
import ntpath
import os.path
import posixpath
import random
import re
import shutil
import string
import sys
import tempfile
import time
from nrvr.diskimage.isoimage import IsoImage, IsoImageModificationFromString, IsoImageModificationFromPath
from nrvr.distros.common.ssh import LinuxSshCommand
from nrvr.distros.common.util import LinuxUtil
from nrvr.distros.el.gnome import ElGnome
from nrvr.distros.el.kickstart import ElIsoImage, ElKickstartFileContent
from nrvr.distros.el.kickstarttemplates import ElKickstartTemplates
from nrvr.distros.el.util import ElUtil
from nrvr.distros.ub.util import UbUtil
from nrvr.distros.ub.rel1204.gnome import Ub1204Gnome
from nrvr.distros.ub.rel1204.kickstart import Ub1204IsoImage, UbKickstartFileContent
from nrvr.distros.ub.rel1204.kickstarttemplates import UbKickstartTemplates
from nrvr.distros.ub.rel1404.gnome import Ub1404Gnome
from nrvr.distros.ub.rel1404.preseed import Ub1404IsoImage, UbPreseedFileContent
from nrvr.distros.ub.rel1404.preseedtemplates import UbPreseedTemplates
from nrvr.machine.ports import PortsFile
from nrvr.process.commandcapture import CommandCapture
from nrvr.remote.ssh import SshCommand, ScpCommand
from nrvr.util.download import Download
from nrvr.util.ipaddress import IPAddress
from nrvr.util.nameserver import Nameserver
from nrvr.util.registering import RegisteringUser
from nrvr.util.requirements import SystemRequirements
from nrvr.util.times import Timestamp
from nrvr.util.user import ScriptUser
from nrvr.vm.vmware import VmdkFile, VmxFile, VMwareHypervisor, VMwareMachine
from nrvr.vm.vmwaretemplates import VMwareTemplates
from nrvr.wins.common.autounattend import WinUdfImage
from nrvr.wins.common.cygwin import CygwinDownload
from nrvr.wins.common.javaw import JavawDownload
from nrvr.wins.common.ssh import CygwinSshCommand
from nrvr.wins.win7.autounattend import Win7UdfImage, Win7AutounattendFileContent
from nrvr.wins.win7.autounattendtemplates import Win7AutounattendTemplates
# this is a good way to preflight check
SystemRequirements.commandsRequiredByImplementations([IsoImage, WinUdfImage,
VmdkFile, VMwareHypervisor,
SshCommand, ScpCommand,
CygwinDownload, JavawDownload],
verbose=True)
# this is a good way to preflight check
VMwareHypervisor.localRequired()
VMwareHypervisor.snapshotsRequired()
# from https://www.scientificlinux.org/download/
scientificLinuxDistro32IsoUrl = "http://ftp.scientificlinux.org/linux/scientific/6.5/i386/iso/SL-65-i386-2013-12-16-Install-DVD.iso"
scientificLinuxDistro64IsoUrl = "http://ftp.scientificlinux.org/linux/scientific/6.5/x86_64/iso/SL-65-x86_64-2014-01-27-Install-DVD.iso"
# from http://isoredirect.centos.org/centos/6/isos/
centOSDistro32IsoUrl = "http://mirrors.usc.edu/pub/linux/distributions/centos/6.6/isos/i386/CentOS-6.6-i386-bin-DVD1.iso"
centOSDistro64IsoUrl = "http://mirrors.usc.edu/pub/linux/distributions/centos/6.6/isos/x86_64/CentOS-6.6-x86_64-bin-DVD1.iso"
# from http://releases.ubuntu.com/
# several packages installed OK until Ubuntu 12.04.4, but apparently not in Ubuntu 12.04.5
ubuntu1204Distro32IsoUrl = "http://releases.ubuntu.com/12.04.4/ubuntu-12.04.4-alternate-i386.iso"
ubuntu1204Distro64IsoUrl = "http://releases.ubuntu.com/12.04.4/ubuntu-12.04.4-alternate-amd64.iso"
# from http://releases.ubuntu.com/
ubuntu1404Distro32IsoUrl = "http://releases.ubuntu.com/14.04.2/ubuntu-14.04.2-desktop-i386.iso"
ubuntu1404Distro64IsoUrl = "http://releases.ubuntu.com/14.04.2/ubuntu-14.04.2-desktop-amd64.iso"
# from http://social.technet.microsoft.com/Forums/windows/en-US/653d34d9-ac99-42db-80c8-6300f01f7aae/windows-7downloard
# or http://forums.mydigitallife.info/threads/14709-Windows-7-Digital-River-direct-links-Multiple-Languages-X86-amp-X64/page60
windows7ProInstaller32EnIsoUrl = "http://msft.digitalrivercontent.net/win/X17-59183.iso"
windows7ProInstaller64EnIsoUrl = "http://msft.digitalrivercontent.net/win/X17-59186.iso"
# from http://code.google.com/p/selenium/downloads/list
seleniumServerStandaloneJarUrl = "http://selenium-release.storage.googleapis.com/2.44/selenium-server-standalone-2.44.0.jar"
# from https://pypi.python.org/pypi/selenium/
seleniumPythonBindingsTarUrl = "https://pypi.python.org/packages/source/s/selenium/selenium-2.44.0.tar.gz"
# from https://pypi.python.org/pypi/setuptools
pythonSetuptoolsTarUrl = "https://pypi.python.org/packages/source/s/setuptools/setuptools-7.0.tar.gz"
#
googleChromeUbuntu32InstallerUrl = "https://dl.google.com/linux/direct/google-chrome-stable_current_i386.deb"
googleChromeUbuntu64InstallerUrl = "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"
# from http://chromedriver.storage.googleapis.com/index.html
chromeDriverLinux32InstallerZipUrl = "http://chromedriver.storage.googleapis.com/2.9/chromedriver_linux32.zip"
chromeDriverLinux64InstallerZipUrl = "http://chromedriver.storage.googleapis.com/2.9/chromedriver_linux64.zip"
chromeDriverWindowsInstallerZipUrl = "http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip"
# from http://code.google.com/p/selenium/downloads/list
seleniumIeDriverServer32ZipUrl = "http://selenium.googlecode.com/files/IEDriverServer_Win32_2.39.0.zip"
seleniumIeDriverServer64ZipUrl = "http://selenium.googlecode.com/files/IEDriverServer_x64_2.39.0.zip"
# from http://www.python.org/download/
python2xWindows32InstallerMsiUrl = "http://www.python.org/ftp/python/2.7.6/python-2.7.6.msi"
python2xWindows64InstallerMsiUrl = "http://www.python.org/ftp/python/2.7.6/python-2.7.6.amd64.msi"
# from http://nodejs.org/download/
nodejsSourceTarUrl = "http://nodejs.org/dist/v0.12.2/node-v0.12.2.tar.gz"
# used to be specific to the website you are testing,
# no probably just the files inside testsDirectory will change
testsInvokerScript = "tests-invoker.py"
testsDirectory = "tests"
# will modulo over machinesPattern,
# customize as needed
testVmsRange = range(181, 184) #191) # or more
# customize as needed
rootpw = "redwood"
# customize as needed
# normally at least one
testUsers = [RegisteringUser(username="tester", pwd="testing"),
RegisteringUser(username="tester2", pwd="testing")
]
MachineParameters = namedtuple("MachineParameters", ["distro", "arch", "browser", "lang", "memsize", "cores"])
class Arch(str): pass # make sure it is a string to avoid string-number unequality
# customize as needed
# sl - Scientific Linux
# cent - CentOS
# ub1204 - Ubuntu 12.04 LTS
# ub1404 - Ubuntu 14.04 LTS
# win - Windows
machinesPattern = [#MachineParameters(distro="sl", arch=Arch(32), browser="firefox", lang="en_US.UTF-8", memsize=900, cores=1),
MachineParameters(distro="cent", arch=Arch(32), browser="firefox", lang="en_US.UTF-8", memsize=900, cores=1),
#MachineParameters(distro="ub1204", arch=Arch(32), browser="chrome", lang="en_US.UTF-8", memsize=960, cores=1),
MachineParameters(distro="ub1404", arch=Arch(32), browser="chrome", lang="en_US.UTF-8", memsize=960, cores=1),
#MachineParameters(distro="sl", arch=Arch(32), browser="firefox", lang="de_DE.UTF-8", memsize=920, cores=1),
#MachineParameters(distro="ub1204", arch=Arch(32), browser="chrome", lang="de_DE.UTF-8", memsize=980, cores=1),
#MachineParameters(distro="ub1404", arch=Arch(32), browser="chrome", lang="de_DE.UTF-8", memsize=980, cores=1),
#MachineParameters(distro="sl", arch=Arch(32), browser="firefox", lang="zh_CN.UTF-8", memsize=1000, cores=1),
#MachineParameters(distro="ub1204", arch=Arch(32), browser="chrome", lang="zh_CN.UTF-8", memsize=1060, cores=1),
#MachineParameters(distro="ub1404", arch=Arch(32), browser="chrome", lang="zh_CN.UTF-8", memsize=1060, cores=1),
#MachineParameters(distro="sl", arch=Arch(64), browser="firefox", lang="en_US.UTF-8", memsize=1400, cores=2),
#MachineParameters(distro="cent", arch=Arch(64), browser="firefox", lang="en_US.UTF-8", memsize=1400, cores=2),
#MachineParameters(distro="ub1204", arch=Arch(64), browser="chrome", lang="en_US.UTF-8", memsize=1460, cores=2),
#MachineParameters(distro="ub1404", arch=Arch(64), browser="chrome", lang="en_US.UTF-8", memsize=1460, cores=2),
MachineParameters(distro="win", arch=Arch(32), browser="iexplorer", lang="en-US", memsize=1020, cores=1),
#MachineParameters(distro="win", arch=Arch(64), browser="iexplorer", lang="en-US", memsize=1520, cores=2),
]
# trying to approximate the order in which identifiers are used from this tuple
VmIdentifiers = namedtuple("VmIdentifiers", ["vmxFilePath", "name", "number", "ipaddress", "mapas"])
# customize as needed
def vmIdentifiersForNumber(number, index):
"""Make various identifiers for a virtual machine.
number
an int probably best between 2 and 254.
Return a VmIdentifiers instance."""
# this is the order in which identifiers are derived
#
# will use hostonly on eth1
ipaddress = IPAddress.numberWithinSubnet(VMwareHypervisor.localHostOnlyIPAddress, number)
name = IPAddress.nameWithNumber("testvm", ipaddress, separator=None)
vmxFilePath = ScriptUser.loggedIn.userHomeRelative("vmware/testvms/%s/%s.vmx" % (name, name))
indexModulo = index % len(machinesPattern)
mapas = machinesPattern[indexModulo]
return VmIdentifiers(vmxFilePath=vmxFilePath,
name=name,
number=number,
ipaddress=ipaddress,
mapas=mapas)
#testVmsIdentifiers = map(lambda number: vmIdentifiersForNumber(number), testVmsRange)
testVmsIdentifiers = []
for index, number in enumerate(testVmsRange):
testVmsIdentifiers.append(vmIdentifiersForNumber(number, index))
# this example use of NrvrCommander makes use of features provided of different components
# of NrvrCommander,
# it may be important to understand that other uses (use patterns) are possible and reasonable
#
# specifically this example use has first been written for and maybe with better understanding
# of Linux operating systems,
# therefore this example use probably has (shows) a different, maybe better, style of
# separation between root and additional (regular) users in Linux than in Windows
#
# it should be possible to spend some time on making it do things differently in Windows,
# not saying this is bad, just saying different styles are possible
def makeTestVmWithGui(vmIdentifiers, forceThisStep=False):
"""Make a single virtual machine.
Enterprise Linux or Ubuntu.
vmIdentifiers
a VmIdentifiers instance.
Return a VMwareMachine instance."""
testVm = VMwareMachine(vmIdentifiers.vmxFilePath)
distro = vmIdentifiers.mapas.distro
arch = vmIdentifiers.mapas.arch
browser = vmIdentifiers.mapas.browser
#
if not distro in ["sl", "cent", "ub1204", "ub1404", "win"]:
raise Exception("unknown distro %s" % (distro))
if not arch in [Arch(32), Arch(64)]:
raise Exception("unknown architecture arch=%s" % (arch))
if not browser in ["firefox", "chrome", "iexplorer"]:
raise Exception("unknown distro %s" % (distro))
if distro in ["sl", "cent"] and browser == "chrome":
raise Exception("cannot run browser %s in distro %s" % (browser, distro))
if distro not in ["win"] and browser == "iexplorer":
raise Exception("cannot run browser %s in distro %s" % (browser, distro))
#
if distro in ["sl", "cent"]:
additionalUsers = testUsers
regularUser = testUsers[0]
elif distro in ["ub1204", "ub1404"]:
# Ubuntu kickstart and preseed support only one regular user
regularUser = testUsers[0]
elif distro == "win":
additionalUsers = testUsers
regularUser = testUsers[0]
#
if forceThisStep:
if VMwareHypervisor.local.isRunning(testVm.vmxFilePath):
testVm.shutdownCommand(ignoreException=True)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
testVm.remove()
#
vmExists = testVm.vmxFile.exists()
if vmExists == False:
# make virtual machine
testVm.mkdir()
#
if distro in ["sl", "cent"]:
if distro == "sl":
if arch == Arch(32):
downloadedDistroIsoImage = ElIsoImage(Download.fromUrl(scientificLinuxDistro32IsoUrl))
elif arch == Arch(64):
downloadedDistroIsoImage = ElIsoImage(Download.fromUrl(scientificLinuxDistro64IsoUrl))
elif distro == "cent":
if arch == Arch(32):
downloadedDistroIsoImage = ElIsoImage(Download.fromUrl(centOSDistro32IsoUrl))
elif arch == Arch(64):
downloadedDistroIsoImage = ElIsoImage(Download.fromUrl(centOSDistro64IsoUrl))
kickstartFileContent = ElKickstartFileContent(ElKickstartTemplates.usableElKickstartTemplate001)
kickstartFileContent.replaceLang(vmIdentifiers.mapas.lang)
kickstartFileContent.replaceRootpw(rootpw)
kickstartFileContent.elReplaceHostname(testVm.basenameStem)
#kickstartFileContent.elReplaceStaticIP(vmIdentifiers.ipaddress, nameservers=Nameserver.list)
# put in DHCP at eth0, to be used with NAT, works well if before hostonly
kickstartFileContent.elReplaceStaticIP(vmIdentifiers.ipaddress, nameservers=[])
kickstartFileContent.elAddNetworkConfigurationWithDhcp("eth0")
if distro == "sl":
kickstartFileContent.replaceAllPackages(ElKickstartTemplates.packagesOfSL64Desktop)
elif distro == "cent":
kickstartFileContent.replaceAllPackages(ElKickstartTemplates.packagesOfCentOS65Desktop)
kickstartFileContent.removePackage("@office-suite") # not used for now
kickstartFileContent.addPackage("python-setuptools") # needed for installing Python packages
kickstartFileContent.addPackage("gcc") # needed for installing Node.js from a specific version .tar
kickstartFileContent.addPackage("gcc-c++") # needed for installing Node.js from a specific version .tar
kickstartFileContent.elActivateGraphicalLogin()
for additionalUser in additionalUsers:
kickstartFileContent.elAddUser(additionalUser.username, pwd=additionalUser.pwd)
kickstartFileContent.setSwappiness(10)
# pick right temporary directory, ideally same as VM
modifiedDistroIsoImage = downloadedDistroIsoImage.cloneWithAutoBootingKickstart \
(kickstartFileContent,
cloneIsoImagePath=os.path.join(testVm.directory, "made-to-order-os-install.iso"))
# some necessary choices pointed out
# 32-bit versus 64-bit Linux, memsizeMegabytes needs to be more for 64-bit
if arch == Arch(32):
guestOS = "centos"
elif arch == Arch(64):
guestOS = "centos-64"
testVm.create(memsizeMegabytes=vmIdentifiers.mapas.memsize,
guestOS=guestOS,
ideDrives=[40000, 300, modifiedDistroIsoImage])
testVm.vmxFile.setNumberOfProcessorCores(vmIdentifiers.mapas.cores)
testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress, user="root", pwd=rootpw)
testVm.portsFile.setShutdown()
for additionalUser in additionalUsers:
testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress,
user=additionalUser.username,
pwd=additionalUser.pwd)
testVm.portsFile.setRegularUser(regularUser.username)
# NAT works well if before hostonly
testVm.vmxFile.setEthernetAdapter(0, "nat")
testVm.vmxFile.setEthernetAdapter(1, "hostonly")
# start up for operating system install
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
testVm.vmxFile.removeAllIdeCdromImages()
modifiedDistroIsoImage.remove()
#
# start up for accepting known host key
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
testVm.sleepUntilHasAcceptedKnownHostKey(ticker=True)
#
# a test machine needs to come up ready to run tests, no manual login
testVm.sshCommand([ElGnome.elCommandToEnableAutoLogin(regularUser.username)])
testVm.sshCommand([ElGnome.elCommandToDisableScreenSaver()], user=regularUser.username)
# avoid distracting backgrounds, picks unique color to be clear this is a test machine
testVm.sshCommand([ElGnome.elCommandToSetSolidColorBackground("#dddd66")], user=regularUser.username)
testVm.sshCommand([ElGnome.elCommandToDisableUpdateNotifications()], user=regularUser.username)
#
# might as well
testVm.sshCommand([ElUtil.commandToEnableSudo(regularUser.username)])
#
# shut down
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
# start up until successful login into GUI
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
userSshParameters = testVm.sshParameters(user=regularUser.username)
LinuxSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
#
testVm.sshCommand([ElGnome.elCommandToAddSystemMonitorPanel()], user=regularUser.username)
#
# shut down for snapshot
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
elif distro in ["ub1204", "ub1404"]:
if distro == "ub1204":
if arch == Arch(32):
downloadedDistroIsoImage = Ub1204IsoImage(Download.fromUrl(ubuntu1204Distro32IsoUrl))
elif arch == Arch(64):
downloadedDistroIsoImage = Ub1204IsoImage(Download.fromUrl(ubuntu1204Distro64IsoUrl))
elif distro == "ub1404":
if arch == Arch(32):
downloadedDistroIsoImage = Ub1404IsoImage(Download.fromUrl(ubuntu1404Distro32IsoUrl))
elif arch == Arch(64):
downloadedDistroIsoImage = Ub1404IsoImage(Download.fromUrl(ubuntu1404Distro64IsoUrl))
if distro == "ub1204":
kickstartFileContent = UbKickstartFileContent(UbKickstartTemplates.usableUbKickstartTemplate001)
kickstartFileContent.replaceLang(vmIdentifiers.mapas.lang)
kickstartFileContent.replaceRootpw(rootpw)
kickstartFileContent.ubReplaceHostname(testVm.basenameStem)
kickstartFileContent.ubCreateNetworkConfigurationSection()
#kickstartFileContent.ubAddNetworkConfigurationStatic(device="eth0", ipaddress=vmIdentifiers.ipaddress, nameservers=Nameserver.list)
# put in DHCP at eth0, to be used with NAT, works well if before hostonly
kickstartFileContent.ubAddNetworkConfigurationDhcp("eth0")
kickstartFileContent.ubAddNetworkConfigurationStatic(device="eth1",
ipaddress=vmIdentifiers.ipaddress,
nameservers=Nameserver.list)
kickstartFileContent.ubSetUpgradeNone()
kickstartFileContent.ubSetUpdatePolicyNone()
kickstartFileContent.replaceAllPackages(UbKickstartTemplates.packagesForUbuntuDesktop)
# default-jre installed OK until Ubuntu 12.04.4, but apparently not in Ubuntu 12.04.5
kickstartFileContent.addPackage("default-jre") # Java needed for Selenium Server standalone .jar
kickstartFileContent.addPackage("python-setuptools") # needed for installing Python packages
kickstartFileContent.addPackage("libxss1") # needed for Google Chrome
kickstartFileContent.addPackage("libappindicator1") # needed for Google Chrome
kickstartFileContent.ubActivateGraphicalLogin()
kickstartFileContent.ubSetUser(regularUser.username, pwd=regularUser.pwd, fullname=regularUser.fullname)
kickstartFileContent.setSwappiness(10)
# pick right temporary directory, ideally same as VM
modifiedDistroIsoImage = downloadedDistroIsoImage.cloneWithAutoBootingKickstart \
(kickstartFileContent,
cloneIsoImagePath=os.path.join(testVm.directory, "made-to-order-os-install.iso"))
elif distro == "ub1404":
preseedFileContent = UbPreseedFileContent(UbPreseedTemplates.usableUbWithGuiPreseedTemplate001)
preseedFileContent.replaceLang(vmIdentifiers.mapas.lang)
preseedFileContent.replaceRootpw(rootpw)
preseedFileContent.replaceHostname(testVm.basenameStem)
#preseedFileContent.addNetworkConfigurationStatic(device="eth0", ipaddress=vmIdentifiers.ipaddress, nameservers=Nameserver.list)
# put in DHCP at eth0, to be used with NAT, works well if before hostonly
# in Ubuntu 14.04 LTS routing works only if not configuring that eth0 interface which will use DHCP
#preseedFileContent.addNetworkConfigurationDhcp("eth0")
preseedFileContent.addNetworkConfigurationStatic(device="eth1",
ipaddress=vmIdentifiers.ipaddress,
nameservers=Nameserver.list)
preseedFileContent.setUpgradeNone()
preseedFileContent.setUpdatePolicyNone()
preseedFileContent.addPackage("default-jre") # Java needed for Selenium Server standalone .jar
preseedFileContent.addPackage("python-setuptools") # needed for installing Python packages
preseedFileContent.addPackage("libappindicator1") # needed for Google Chrome
preseedFileContent.addPackage("gnome-panel") # per http://ubuntuforums.org/showthread.php?t=2140745
preseedFileContent.setUser(regularUser.username, pwd=regularUser.pwd, fullname=regularUser.fullname)
preseedFileContent.setSwappiness(10)
# pick right temporary directory, ideally same as VM
modifiedDistroIsoImage = downloadedDistroIsoImage.cloneWithAutoBootingPreseed \
(preseedFileContent,
UbPreseedTemplates.usableUbWithGuiPreseedFirstTimeStartScript001,
cloneIsoImagePath=os.path.join(testVm.directory, "made-to-order-os-install.iso"))
# some necessary choices pointed out
# 32-bit versus 64-bit Linux, memsizeMegabytes needs to be more for 64-bit
if arch == Arch(32):
guestOS = "ubuntu"
elif arch == Arch(64):
guestOS = "ubuntu-64"
testVm.create(memsizeMegabytes=vmIdentifiers.mapas.memsize,
guestOS=guestOS,
ideDrives=[40000, 300, modifiedDistroIsoImage])
testVm.vmxFile.setNumberOfProcessorCores(vmIdentifiers.mapas.cores)
# a possible choice pointed out
#testVm.vmxFile.setAccelerate3D()
testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress, user="root", pwd=rootpw)
testVm.portsFile.setShutdown()
testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress,
user=regularUser.username,
pwd=regularUser.pwd)
testVm.portsFile.setRegularUser(regularUser.username)
# NAT works well if before hostonly
testVm.vmxFile.setEthernetAdapter(0, "nat")
testVm.vmxFile.setEthernetAdapter(1, "hostonly")
# start up for operating system install
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
testVm.vmxFile.removeAllIdeCdromImages()
modifiedDistroIsoImage.remove()
#
# start up for accepting known host key
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
# allow more time, hope this will fix problem which made grub not proceed with default
testVm.sleepUntilHasAcceptedKnownHostKey(ticker=True, extraSleepSeconds=15)
#
# a test machine needs to come up ready to run tests, no manual login
testVm.sshCommand([UbUtil.ubCommandToEnableAutoLogin(regularUser.username)])
# might as well
testVm.sshCommand([UbUtil.ubCommandToDisableGuestLogin()])
#
# might as well
testVm.sshCommand([UbUtil.commandToEnableSudo(regularUser.username)])
#
# shut down
# allow more time, hope this will fix problem which made grub not proceed with default
testVm.shutdownCommand(firstSleepSeconds=10)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
# start up
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
userSshParameters = testVm.sshParameters(user=regularUser.username)
#
if distro == "ub1204":
# until successful login into GUI
LinuxSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
#
testVm.sshCommand([Ub1204Gnome.ubCommandToDisableScreenSaver()], user=regularUser.username)
# avoid distracting backgrounds, picks unique color to be clear this is a test machine
testVm.sshCommand([Ub1204Gnome.ubCommandToSetSolidColorBackground("#dddd66")], user=regularUser.username)
# indicator-multiload installed OK until Ubuntu 12.04.4, but apparently not in Ubuntu 12.04.5
testVm.sshCommand([Ub1204Gnome.ubCommandToInstallSystemMonitorPanel()])
testVm.sshCommand([Ub1204Gnome.ubCommandToAddSystemMonitorPanel()], user=regularUser.username)
elif distro == "ub1404":
# until successful login into GUI
# allow more time, hope this will allow the gsettings commands to work and stick for good
LinuxSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True, extraSleepSeconds=30)
#
testVm.sshCommand([Ub1404Gnome.ubCommandToDisableScreenSaver()], user=regularUser.username)
# avoid distracting backgrounds, picks unique color to be clear this is a test machine
testVm.sshCommand([Ub1404Gnome.ubCommandToSetSolidColorBackground("#dddd66")], user=regularUser.username)
testVm.sshCommand([Ub1404Gnome.ubCommandToInstallSystemMonitorPanel()])
testVm.sshCommand([Ub1404Gnome.ubCommandToAddSystemMonitorPanel()], user=regularUser.username)
#
# try avoiding excessive GPU use in virtual machines
testVm.sshCommand([Ub1404Gnome.ubCommandToLimitCompizGpuUse()], user=regularUser.username)
#
# shut down for snapshot
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
elif distro == "win":
if arch == Arch(32):
downloadedDistroIsoImage = Win7UdfImage(Download.fromUrl(windows7ProInstaller32EnIsoUrl))
elif arch == Arch(64):
downloadedDistroIsoImage = Win7UdfImage(Download.fromUrl(windows7ProInstaller64EnIsoUrl))
# some necessary choices pointed out
# 32-bit versus 64-bit windows, memsizeMegabytes needs to be more for 64-bit
if arch == Arch(32):
guestOS = "windows7"
elif arch == Arch(64):
guestOS = "windows7-64"
testVm.create(memsizeMegabytes=vmIdentifiers.mapas.memsize,
guestOS=guestOS,
ideDrives=[40000]) #, modifiedDistroIsoImage])
testVm.vmxFile.setNumberOfProcessorCores(vmIdentifiers.mapas.cores)
# a possible choice pointed out
#testVm.vmxFile.setAccelerate3D()
cygServerRandomPwd = ''.join(random.choice(string.letters) for i in xrange(20))
# were considering doing ssh-host-config --yes --pwd $( openssl rand -hex 16 )
# and intentionally not knowing how to log in as user cyg_server;
# but even knowing pwd apparently we cannot log in via ssh, hence for now we do not
#testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress, user="cyg_server", pwd=cygServerRandomPwd)
# hence we do not
#testVm.portsFile.setShutdown(command="shutdown -h now", user="cyg_server")
# instead do something that works
#
# important to know difference in Cygwin between PATH when logged in via ssh interactively,
# in which case Cygwin directories such as /usr/local/bin and /usr/bin come first,
# versus sending a command via ssh command line,
# in which case it is Windows system directories only,
# also see http://cygwin.com/ml/cygwin/2005-05/msg00012.html ,
# which you can verify by remotely viewing
# ssh -l tester 10.123.45.67 echo \$PATH
# hence this shutdown command invokes the Windows shutdown command
testVm.portsFile.setShutdown(command="shutdown -s -t 20", user=regularUser.username)
for additionalUser in additionalUsers:
testVm.portsFile.setSsh(ipaddress=vmIdentifiers.ipaddress,
user=additionalUser.username,
pwd=additionalUser.pwd)
testVm.portsFile.setRegularUser(regularUser.username)
# NAT works well if before hostonly
testVm.vmxFile.setEthernetAdapter(0, "nat")
testVm.vmxFile.setEthernetAdapter(1, "hostonly")
# generated MAC addresses are available only after first start of a virtual machine
VMwareHypervisor.local.startAndStopWithIdeDrivesDisabled(testVm.vmxFilePath, gui=True)
ethernetAdapter0MacAddress = testVm.vmxFile.getEthernetMacAddress(0)
ethernetAdapter1MacAddress = testVm.vmxFile.getEthernetMacAddress(1)
# autounattend file content
autounattendFileContent = Win7AutounattendFileContent(Win7AutounattendTemplates.usableWin7AutounattendTemplate001)
autounattendFileContent.replaceLanguageAndLocale(vmIdentifiers.mapas.lang)
autounattendFileContent.replaceAdminPw(rootpw)
autounattendFileContent.replaceComputerName(testVm.basenameStem)
# a network interface with static configuration
autounattendFileContent.addNetworkConfigurationStatic(mac=ethernetAdapter1MacAddress,
ipaddress=vmIdentifiers.ipaddress,
limitRoutingToLocalByNetmask=True)
# simplified use of acceptEula
autounattendFileContent.acceptEula(fullname=regularUser.fullname, organization=regularUser.organization)
for additionalUser in additionalUsers:
autounattendFileContent.addLocalAccount(username=additionalUser.username,
pwd=additionalUser.pwd,
fullname=additionalUser.fullname,
groups=["Administrators"])
autounattendFileContent.enableAutoLogon(regularUser.username, regularUser.pwd)
# additional modifications
modifications = []
customDirectoryPathOnIso = "custom"
#
# shutdown only while installer disk is present
shutdownRandomScriptName = ''.join(random.choice(string.letters) for i in xrange(8))
shutdownScriptPathOnIso = os.path.join(customDirectoryPathOnIso, shutdownRandomScriptName + ".bat")
modifications.extend([
# an intentionally transient shutdown script
IsoImageModificationFromString
(shutdownScriptPathOnIso,
r'shutdown -s -t 20 -c "Running shutdown script ' + shutdownScriptPathOnIso +
r' intended as part of installation process."'),
])
shutdownScriptPathForCommandLine = shutdownScriptPathOnIso.replace("/", "\\")
shutdownScriptInvocationCommandLine = \
ntpath.join("D:\\", shutdownScriptPathForCommandLine)
autounattendFileContent.addLogonCommand(order=490,
commandLine=shutdownScriptInvocationCommandLine,
description="Shutdown - intentionally transient")
#
# install Cygwin 32-bit even in Windows 64-bit
# see http://stackoverflow.com/questions/18329233/is-it-advisable-to-switch-from-cygwin-32bit-to-cygwin-64bit
cygwinArch = Arch(32)
# locally downloaded Cygwin packages directory
# see http://www.cygwin.com/install.html
# see http://www.cygwin.com/faq/faq.html#faq.setup.cli
cygwinPackagesPathOnHost = CygwinDownload.forArch(cygwinArch, CygwinDownload.usablePackageDirs001)
cygwinPackagesPathOnIso = os.path.join(customDirectoryPathOnIso, os.path.basename(cygwinPackagesPathOnHost))
cygwinPackagesPathForCommandLine = cygwinPackagesPathOnIso.replace("/", "\\")
# run Cygwin installer, intentionally only while installer disk is present
cygwinInstallRandomScriptName = ''.join(random.choice(string.letters) for i in xrange(7))
cygwinInstallScriptPathOnIso = os.path.join(customDirectoryPathOnIso, cygwinInstallRandomScriptName + ".bat")
# Cygwin installer
cygwinInstallCommandLine = \
ntpath.join("D:\\", cygwinPackagesPathForCommandLine, CygwinDownload.installerName(cygwinArch)) + \
r" --local-install" + \
r" --local-package-dir " + ntpath.join("D:\\", cygwinPackagesPathForCommandLine) + \
r" --root C:\cygwin" + \
r" --quiet-mode" + \
r" --no-desktop" + \
r" --packages " + CygwinDownload.usablePackageList001
# ssh-host-config
cygwinSshdConfigCommandLine = \
r"C:\cygwin\bin\bash --login -c " '"' + \
r"ssh-host-config --yes --pwd " + cygServerRandomPwd + \
'"'
# in /etc/sshd_config set MaxAuthTries 2, minimum to get prompted, less than default 6
cygwinSshdFixUpConfigCommandLine = \
r"C:\cygwin\bin\bash --login -c " '"' + \
r"( sed -i -e 's/.*MaxAuthTries\s.*/MaxAuthTries 2/g' /etc/sshd_config )" + \
'"'
# allow incoming ssh
openFirewallForSshdCommandLine = \
r"C:\cygwin\bin\bash --login -c " '"' + \
r"if ! netsh advfirewall firewall show rule name=SSHD ; then " + \
r"netsh advfirewall firewall add rule name=SSHD dir=in action=allow protocol=tcp localport=22" + \
r" ; fi" + \
'"'
# start service
startSshdCommandLine = \
"net start sshd"
modifications.extend([
# the Cygwin packages
IsoImageModificationFromPath(cygwinPackagesPathOnIso, cygwinPackagesPathOnHost),
# an intentionally transient install script;
# also pre-makes /etc/setup directory to prevent subtle setup defects,
# those defects being caused by not writing files which will be needed to rebase;
# also rebaseall in those defective circumstances could not help against:
# sshd child_info_fork::abort cygwrap-0.dll Loaded to different address,
# and that command line must use ash, not bash,
# not doing it for now but would be
# r"C:\cygwin\bin\ash -c " '"' r"/bin/rebaseall" + '"'
IsoImageModificationFromString
(cygwinInstallScriptPathOnIso,
#"mkdir C:\\" + cygwinPackagesPathForCommandLine + "\n" + \
#"xcopy D:\\" + cygwinPackagesPathForCommandLine + " C:\\" + cygwinPackagesPathForCommandLine + " /S /E" + "\n" + \
"mkdir C:\\cygwin\\etc\\setup\n" + \
cygwinInstallCommandLine + "\n" + \
cygwinSshdConfigCommandLine + "\n" + \
cygwinSshdFixUpConfigCommandLine + "\n" + \
openFirewallForSshdCommandLine + "\n" + \
startSshdCommandLine),
])
cygwinInstallScriptPathForCommandLine = cygwinInstallScriptPathOnIso.replace("/", "\\")
cygwinInstallScriptInvocationCommandLine = \
ntpath.join("D:\\", cygwinInstallScriptPathForCommandLine)
autounattendFileContent.addFirstLogonCommand(order=400,
commandLine=cygwinInstallScriptInvocationCommandLine,
description="Install Cygwin - intentionally transient")
#
# a detached instance of screen (per logged in user) to be able to start GUI programs from ssh,
# basic idea from http://superuser.com/questions/531787/starting-windows-gui-program-in-windows-through-cygwin-sshd-from-ssh-client
# and then needed more exploration and experimentation;
# also to expand variable out of registry before command line gets it,
# first must get % into registry value,
# see http://stackoverflow.com/questions/3620388/how-to-use-reg-expand-sz-from-the-commandline
runDetachedScreenRegistryValueCommandLine = \
r"""cmd.exe /c reg.exe add HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run """ + \
r"""/v CygwinScreen /t REG_EXPAND_SZ """ + \
r"""/d "C:\cygwin\bin\bash.exe --login -c 'screen -wipe ; screen -d -m -S wguifor_"^%USERNAME^%"'" """ + \
r"""/f"""
autounattendFileContent.addFirstLogonCommand(order=401,
commandLine=runDetachedScreenRegistryValueCommandLine,
description="Add registry value for running detached screen")
#
# various
disableIExplorerFirstRunWizardCommandLine = \
r"""reg.exe add "HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Internet Explorer\Main" """ + \
r"""/v DisableFirstRunCustomize /t REG_DWORD /d 1 /f"""
autounattendFileContent.addFirstLogonCommand(order=410,
commandLine=disableIExplorerFirstRunWizardCommandLine,
description="Disable Internet Explorer first run wizard")
#
if arch == Arch(32):
autounattendFileContent.adjustFor32Bit()
elif arch == Arch(64):
autounattendFileContent.adjustFor64Bit()
# pick right temporary directory, ideally same as VM
modifiedDistroIsoImage = downloadedDistroIsoImage.cloneWithAutounattend \
(autounattendFileContent,
modifications=modifications,
cloneIsoImagePath=os.path.join(testVm.directory, "made-to-order-os-install.iso"))
# set CD-ROM .iso file, which had been kept out intentionally for first start for generating MAC addresses
testVm.vmxFile.setIdeCdromIsoFile(modifiedDistroIsoImage.isoImagePath, 1, 0)
# start up for operating system install
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
testVm.vmxFile.removeAllIdeCdromImages()
modifiedDistroIsoImage.remove()
#
# start up for accepting known host key
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
testVm.sleepUntilHasAcceptedKnownHostKey(ticker=True)
#
# shut down for snapshot
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
#
VMwareHypervisor.local.createSnapshot(testVm.vmxFilePath, "OS installed")
#
return testVm
def installToolsIntoTestVm(vmIdentifiers, forceThisStep=False):
testVm = VMwareMachine(vmIdentifiers.vmxFilePath)
distro = vmIdentifiers.mapas.distro
arch = vmIdentifiers.mapas.arch
browser = vmIdentifiers.mapas.browser
#
if distro in ["sl", "cent", "ub1204", "ub1404"]:
rootOrAnAdministrator = "root"
elif distro == "win":
rootOrAnAdministrator = testVm.regularUser
#
snapshots = VMwareHypervisor.local.listSnapshots(vmIdentifiers.vmxFilePath)
snapshotExists = "tools installed" in snapshots
if not snapshotExists or forceThisStep:
if VMwareHypervisor.local.isRunning(testVm.vmxFilePath):
testVm.shutdownCommand(ignoreException=True)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
VMwareHypervisor.local.revertToSnapshotAndDeleteDescendants(vmIdentifiers.vmxFilePath, "OS installed")
#
# start up until successful login into GUI
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
userSshParameters = testVm.sshParameters(user=testVm.regularUser)
if distro in ["sl", "cent", "ub1204", "ub1404"]:
LinuxSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
elif distro == "win":
CygwinSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
#
# a necessity on some international version OS
testVm.sshCommand(["mkdir -p ~/Downloads"], user=testVm.regularUser)
if distro == "win":
testVm.sshCommand(['mkdir -p "$( cygpath -u "$USERPROFILE/Downloads" )"'], user=testVm.regularUser)
echo = testVm.sshCommand(['echo "$( cygpath -u "$USERPROFILE/Downloads" )"'], user=testVm.regularUser)
windowsUserDownloadDirCygwinPath = echo.output.strip()
echo = testVm.sshCommand([r"cmd.exe /C 'echo %USERPROFILE%\Downloads'"], user=testVm.regularUser)
windowsUserDownloadDirWindowsPath = echo.output.strip()
# for symmetry and comprehensibility
testVm.sshCommand(["mkdir -p ~/Downloads"], user=rootOrAnAdministrator)
#
# install Java
if distro == "win":
# Java for Windows
# install Java 32-bit even in Windows 64-bit
# see http://www.java.com/en/download/faq/java_win64bit.xml
javawInstallerOnHostPath = JavawDownload.now()
javawInstallerBasename = os.path.basename(javawInstallerOnHostPath)
javawInstallerOnGuestCygwinPath = posixpath.join(windowsUserDownloadDirCygwinPath, javawInstallerBasename)
javawInstallerOnGuestWindowsPath = ntpath.join(windowsUserDownloadDirWindowsPath, javawInstallerBasename)
testVm.scpPutCommand(fromHostPath=javawInstallerOnHostPath,
toGuestPath=javawInstallerOnGuestCygwinPath,
guestUser=testVm.regularUser)
# run installer
testVm.sshCommand(["chmod +x " + javawInstallerOnGuestCygwinPath],
user=testVm.regularUser)
# see http://java.com/en/download/help/silent_install.xml
# also, tolerate like Error opening file C:\Users\tester\AppData\LocalLow\Sun\Java\jre1.7.0_45\Java3BillDevices.jpg
# also, tolerate Error: 2
# also, work around Java for Windows installer program despite success not exiting if invoked this way
testVm.sshCommand(["( nohup cmd.exe /C '" + javawInstallerOnGuestWindowsPath
+ " /s /L " + javawInstallerOnGuestWindowsPath + ".log' &> /dev/null & )"],
user=testVm.regularUser,
exceptionIfNotZero=False)
waitingForJavawInstallerSuccess = True
while waitingForJavawInstallerSuccess:
time.sleep(5.0)
javaVersion = testVm.sshCommand(
["java -version"],
user=testVm.regularUser,
exceptionIfNotZero=False)
if not javaVersion.returncode:
waitingForJavawInstallerSuccess = False
# suppress scheduled check for Java updates
testVm.sshCommand(
[r"cmd.exe /C "
+ r"""reg.exe delete 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run'"""
+ r""" /v SunJavaUpdateSched /f"""],
user=rootOrAnAdministrator,
exceptionIfNotZero=True)
#
#
# install Python
if distro == "win":
# Python for Windows
if arch == Arch(32):
python2xWindowsInstallerMsiUrl = python2xWindows32InstallerMsiUrl
elif arch == Arch(64):
python2xWindowsInstallerMsiUrl = python2xWindows64InstallerMsiUrl
pythonInstallerOnHostPath = Download.fromUrl(python2xWindowsInstallerMsiUrl)
pythonInstallerBasename = os.path.basename(pythonInstallerOnHostPath)
pythonInstallerOnGuestCygwinPath = posixpath.join(windowsUserDownloadDirCygwinPath, pythonInstallerBasename)
pythonInstallerOnGuestWindowsPath = ntpath.join(windowsUserDownloadDirWindowsPath, pythonInstallerBasename)
testVm.scpPutCommand(fromHostPath=pythonInstallerOnHostPath,
toGuestPath=pythonInstallerOnGuestCygwinPath,
guestUser=testVm.regularUser)
# run installer
# see http://www.python.org/download/releases/2.4/msi/
testVm.sshCommand(["cmd.exe /C 'msiexec.exe /i " + pythonInstallerOnGuestWindowsPath
+ " ALLUSERS=1 /qb! /log " + pythonInstallerOnGuestWindowsPath + ".log'"],
user=testVm.regularUser)
# add to PATH for system
# Cygwin regtool syntax see http://cygwin.com/cygwin-ug-net/using-utils.html
machineWidePathRegistryKeyValue = "/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session Manager/Environment/Path"
# assuming python.exe is in C:\Python27 or so
testVm.sshCommand(['PYDIR="$( cygpath -w "$( echo /cygdrive/c/Py* )" )"'
+ ' && '
+ 'regtool --wow64 --expand-string set "' + machineWidePathRegistryKeyValue
+ '" "$( regtool --wow64 get "' + machineWidePathRegistryKeyValue + '" );$PYDIR"'],
user=testVm.regularUser)
# must restart for change of PATH to be effective
# shut down
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
# start up until successful login into GUI
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
CygwinSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
#
# install Node.js
if distro in ["sl", "cent", "ub1204", "ub1404"]:
nodejsSourceTarOnHostPath = Download.fromUrl(nodejsSourceTarUrl)
nodejsSourceTarBasename = Download.basename(nodejsSourceTarUrl)
nodejsSourceTarOnGuestPath = posixpath.join("~/Downloads", nodejsSourceTarBasename)
testVm.scpPutCommand(fromHostPath=nodejsSourceTarOnHostPath,
toGuestPath=nodejsSourceTarOnGuestPath,
guestUser=rootOrAnAdministrator)
nodejsSourcesExtracted = re.match(r"^(\S+)(?:\.tar\.gz)$", nodejsSourceTarBasename).group(1)
testVm.sshCommand(["cd ~/Downloads"
+ " && tar -xf " + nodejsSourceTarOnGuestPath
+ " && cd " + nodejsSourcesExtracted + "/"
+ " && ./configure && make && make install"],
user=rootOrAnAdministrator)
#
# install Google Chrome
if browser == "chrome" and distro in ["ub1204", "ub1404"]:
if arch == Arch(32):
googleChromeUbuntuInstallerUrl = googleChromeUbuntu32InstallerUrl
elif arch == Arch(64):
googleChromeUbuntuInstallerUrl = googleChromeUbuntu64InstallerUrl
chromeInstallerOnHostPath = Download.fromUrl(googleChromeUbuntuInstallerUrl)
chromeInstallerOnGuestPath = posixpath.join("~/Downloads", Download.basename(googleChromeUbuntuInstallerUrl))
testVm.scpPutCommand(fromHostPath=chromeInstallerOnHostPath,
toGuestPath=chromeInstallerOnGuestPath,
guestUser=rootOrAnAdministrator)
# install
testVm.sshCommand(["cd ~/Downloads"
+ " && dpkg -i " + chromeInstallerOnGuestPath],
user=rootOrAnAdministrator)
# run once, wait, terminate
testVm.sshCommand(["export DISPLAY=:0.0 ; "
+ "( nohup"
+ " google-chrome --cancel-first-run --no-default-browser-check about:blank"
+ " &> /dev/null & )"
+ " && sleep 5"
+ " && kill $( pidof chrome )"],
user=testVm.regularUser)
#
# install Selenium Server standalone
# default-jre installed OK until Ubuntu 12.04.4, but apparently not in Ubuntu 12.04.5
seleniumServerStandaloneJarPath = Download.fromUrl(seleniumServerStandaloneJarUrl)
testVm.scpPutCommand(fromHostPath=seleniumServerStandaloneJarPath,
toGuestPath="~/Downloads/" + Download.basename(seleniumServerStandaloneJarUrl),
guestUser=testVm.regularUser)
#
if browser == "chrome":
# install ChromeDriver
# see http://code.google.com/p/selenium/wiki/ChromeDriver
if arch == Arch(32):
chromeDriverLinuxInstallerZipUrl = chromeDriverLinux32InstallerZipUrl
elif arch == Arch(64):
chromeDriverLinuxInstallerZipUrl = chromeDriverLinux64InstallerZipUrl
chromeDriverInstallerZipOnHostPath = Download.fromUrl(chromeDriverLinuxInstallerZipUrl)
chromeDriverInstallerZipBasename = Download.basename(chromeDriverLinuxInstallerZipUrl)
chromeDriverInstallerZipOnGuestPath = posixpath.join("~/Downloads", chromeDriverInstallerZipBasename)
testVm.scpPutCommand(fromHostPath=chromeDriverInstallerZipOnHostPath,
toGuestPath=chromeDriverInstallerZipOnGuestPath,
guestUser=rootOrAnAdministrator)
chromeDriverInstallerExtracted = re.match(r"^(\S+)(?:\.zip)$", chromeDriverInstallerZipBasename).group(1)
chromeDriverInstallerExtractedPath = posixpath.join("~/Downloads", chromeDriverInstallerExtracted)
# unzip and copy to where it is on PATH
testVm.sshCommand(["cd ~/Downloads"
+ " && unzip -o " + chromeDriverInstallerZipOnGuestPath + " -d " + chromeDriverInstallerExtractedPath
+ " && chmod +x " + chromeDriverInstallerExtractedPath + "/chromedriver"
+ " && cp " + chromeDriverInstallerExtractedPath + "/chromedriver /usr/local/bin"],
user=rootOrAnAdministrator)
#
if browser == "iexplorer":
# install IeDriver
# see http://code.google.com/p/selenium/wiki/InternetExplorerDriver
if arch == Arch(32):
seleniumIeDriverServerZipUrl = seleniumIeDriverServer32ZipUrl
elif arch == Arch(64):
seleniumIeDriverServerZipUrl = seleniumIeDriverServer64ZipUrl
seleniumIeDriverServerZipOnHostPath = Download.fromUrl(seleniumIeDriverServerZipUrl)
seleniumIeDriverServerZipBasename = Download.basename(seleniumIeDriverServerZipUrl)
seleniumIeDriverServerZipOnGuestPath = posixpath.join("~/Downloads", seleniumIeDriverServerZipBasename)
testVm.scpPutCommand(fromHostPath=seleniumIeDriverServerZipOnHostPath,
toGuestPath=seleniumIeDriverServerZipOnGuestPath,
guestUser=rootOrAnAdministrator)
seleniumIeDriverServerExtracted = re.match(r"^(\S+)(?:\.zip)$", seleniumIeDriverServerZipBasename).group(1)
seleniumIeDriverServerExtractedPath = posixpath.join("~/Downloads", seleniumIeDriverServerExtracted)
# unzip and copy to where it is on PATH, e.g. SYSTEMROOT could be /cygdrive/c/Windows
testVm.sshCommand(["cd ~/Downloads"
+ " && unzip -o " + seleniumIeDriverServerZipOnGuestPath + " -d " + seleniumIeDriverServerExtractedPath
+ " && chmod +x " + seleniumIeDriverServerExtractedPath + "/IEDriverServer.exe"
+ " && cp " + seleniumIeDriverServerExtractedPath + "/IEDriverServer.exe $SYSTEMROOT"],
user=rootOrAnAdministrator)
# prevent firewall dialog on screen, regarding IeDriver
testVm.sshCommand(
[r'if ! netsh advfirewall firewall show rule name=IEDriverServer ; then '
+ r'netsh advfirewall firewall add rule program="$SYSTEMROOT\IEDriverServer.exe" name=IEDriverServer dir=in action=allow protocol=tcp localport=any'
+ r' ; fi'],
user=rootOrAnAdministrator)
#
# install Python bindings
if distro == "win":
# first an auxiliary tool
pythonSetuptoolsTarOnHostPath = Download.fromUrl(pythonSetuptoolsTarUrl)
pythonSetuptoolsTarBasename = Download.basename(pythonSetuptoolsTarUrl)
pythonSetuptoolsTarOnGuestPath = posixpath.join("~/Downloads", pythonSetuptoolsTarBasename)
testVm.scpPutCommand(fromHostPath=pythonSetuptoolsTarOnHostPath,
toGuestPath=pythonSetuptoolsTarOnGuestPath,
guestUser=rootOrAnAdministrator)
pythonSetuptoolsExtracted = re.match(r"^(\S+)(?:\.tar\.gz)$", pythonSetuptoolsTarBasename).group(1)
testVm.sshCommand(["cd ~/Downloads"
+ " && tar -xf " + pythonSetuptoolsTarOnGuestPath
+ " && cd " + pythonSetuptoolsExtracted + "/"
+ " && chmod +x setup.py"
+ " && python ./setup.py install"],
user=rootOrAnAdministrator)
seleniumPythonBindingsTarOnHostPath = Download.fromUrl(seleniumPythonBindingsTarUrl)
seleniumPythonBindingsTarBasename = Download.basename(seleniumPythonBindingsTarUrl)
seleniumPythonBindingsTarOnGuestPath = posixpath.join("~/Downloads", seleniumPythonBindingsTarBasename)
testVm.scpPutCommand(fromHostPath=seleniumPythonBindingsTarOnHostPath,
toGuestPath=seleniumPythonBindingsTarOnGuestPath,
guestUser=rootOrAnAdministrator)
seleniumPythonBindingsExtracted = re.match(r"^(\S+)(?:\.tar\.gz)$", seleniumPythonBindingsTarBasename).group(1)
testVm.sshCommand(["cd ~/Downloads"
+ " && tar -xf " + seleniumPythonBindingsTarOnGuestPath
+ " && cd " + seleniumPythonBindingsExtracted + "/"
+ " && chmod +x setup.py"
+ " && python ./setup.py install"],
user=rootOrAnAdministrator)
#
# shut down for snapshot
testVm.shutdownCommand()
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
VMwareHypervisor.local.createSnapshot(testVm.vmxFilePath, "tools installed")
def runTestsInTestVm(vmIdentifiers, forceThisStep=False):
testVm = VMwareMachine(vmIdentifiers.vmxFilePath)
distro = vmIdentifiers.mapas.distro
browser = vmIdentifiers.mapas.browser
#
snapshots = VMwareHypervisor.local.listSnapshots(vmIdentifiers.vmxFilePath)
snapshotExists = "ran tests" in snapshots
if not snapshotExists or forceThisStep:
if VMwareHypervisor.local.isRunning(testVm.vmxFilePath):
testVm.shutdownCommand(ignoreException=True)
VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
VMwareHypervisor.local.revertToSnapshotAndDeleteDescendants(vmIdentifiers.vmxFilePath, "tools installed")
#
# start up until successful login into GUI
VMwareHypervisor.local.start(testVm.vmxFilePath, gui=True, extraSleepSeconds=0)
userSshParameters = testVm.sshParameters(user=testVm.regularUser)
if distro in ["sl", "cent", "ub1204", "ub1404"]:
LinuxSshCommand.sleepUntilIsGuiAvailable(userSshParameters, ticker=True)
elif distro == "win":
CygwinSshCommand.sleepUntilIsGuiAvailable(userSshParameters, alsoNeedsScreen=True, ticker=True)
#
# copy tests
scriptDir = os.path.dirname(os.path.abspath(__file__))
testsInvokerScriptPath = os.path.join(scriptDir, testsInvokerScript)
testsDirectoryPath = os.path.join(scriptDir, testsDirectory)
testVm.scpPutCommand(fromHostPath=[testsInvokerScriptPath, testsDirectoryPath],
toGuestPath="~/Downloads/",
guestUser=testVm.regularUser)
# fix up tests, if necessary
if browser == "chrome":
# switch from webdriver.Firefox() to webdriver.Chrome()
testVm.sshCommand(["sed -i -e 's/webdriver\.Firefox/webdriver.Chrome/'"
+ " ~/Downloads/" + testsDirectory + "/*.py"],
user=testVm.regularUser)
elif browser == "iexplorer":
# switch from webdriver.Firefox() to webdriver.Ie()
testVm.sshCommand(["sed -i -e 's/webdriver\.Firefox/webdriver.Ie/'"
+ " ~/Downloads/" + testsDirectory + "/*.py"],
user=testVm.regularUser)
#
# apparently on some virtual machines the NAT interface takes some time to come up
SshCommand(userSshParameters,
[LinuxUtil.commandToWaitForNetworkDevice(device="eth0", maxSeconds=100)])
#
# start up Selenium Server
# default-jre installed OK until Ubuntu 12.04.4, but apparently not in Ubuntu 12.04.5
SshCommand(userSshParameters,
["nohup "
+ "java -jar ~/Downloads/" + Download.basename(seleniumServerStandaloneJarUrl)
+ " &> /dev/null &"])
# allow some time to start up
time.sleep(5)
#
# run tests
if distro in ["sl", "cent", "ub1204", "ub1404"]:
testVm.sshCommand(["export DISPLAY=:0.0 ; "
+ "cd ~/Downloads/"
+ " && chmod +x " + testsInvokerScript
+ " && chmod +x " + testsDirectory + "/*.py"
+ " && ( nohup python ./" + testsInvokerScript + " &> ./" + testsInvokerScript + ".log & )"],
user=testVm.regularUser)
elif distro == "win":
testVm.sshCommand(["screen -wipe ; screen -S wguifor_$USERNAME -X stuff '"
+ "cd ~/Downloads/"
+ " && chmod +x " + testsInvokerScript
+ " && chmod +x " + testsDirectory + "/*.py"
+ " && ( nohup python ./" + testsInvokerScript + " &> ./" + testsInvokerScript + ".log & )\n'"],
user=testVm.regularUser)
#time.sleep(60)
#
# shut down for snapshot
#testVm.shutdownCommand()
#VMwareHypervisor.local.sleepUntilNotRunning(testVm.vmxFilePath, ticker=True)
#VMwareHypervisor.local.createSnapshot(testVm.vmxFilePath, "ran tests")
# make sure virtual machines are no longer running from previous activities if any
for vmIdentifiers in testVmsIdentifiers:
VMwareHypervisor.local.notRunningRequired(vmIdentifiers.vmxFilePath)
testVms = []
for vmIdentifiers in testVmsIdentifiers:
testVm = makeTestVmWithGui(vmIdentifiers)
testVms.append(testVm)
for vmIdentifiers in testVmsIdentifiers:
testVm = installToolsIntoTestVm(vmIdentifiers) #, forceThisStep=True)
for vmIdentifiers in testVmsIdentifiers:
testVm = runTestsInTestVm(vmIdentifiers) #, forceThisStep=True)
# alternate kind of loop we are not using right now
for testVm in testVms:
pass
print "DONE for now, processed %s" % (", ".join(map(lambda vmdentifier: vmdentifier.name, testVmsIdentifiers)))
|
|
#coding: utf-8
from lxml import etree as ET
import re
import plumber
SUPPLBEG_REGEX = re.compile(r'^0 ')
SUPPLEND_REGEX = re.compile(r' 0$')
ISO6392T_TO_ISO6392B = {
u'sqi': u'alb',
u'hye': u'arm',
u'eus': u'baq',
u'mya': u'bur',
u'zho': u'chi',
u'ces': u'cze',
u'nld': u'dut',
u'fra': u'fre',
u'kat': u'geo',
u'deu': u'ger',
u'ell': u'gre',
u'isl': u'ice',
u'mkd': u'mac',
u'msa': u'may',
u'mri': u'mao',
u'fas': u'per',
u'ron': u'rum',
u'slk': u'slo',
u'bod': u'tib',
u'cym': u'wel'
}
class SetupArticlePipe(plumber.Pipe):
def transform(self, data):
xml = ET.Element('records')
return data, xml
class XMLArticlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
article = ET.Element('record')
xml.append(article)
return data
class XMLJournalMetaJournalTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
journaltitle = ET.Element('journalTitle')
journaltitle.text = raw.journal.title
xml.find('./record').append(journaltitle)
return data
class XMLJournalMetaISSNPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
issn = ET.Element('issn')
issn.text = raw.any_issn()
xml.find('./record').append(issn)
return data
class XMLJournalMetaPublisherPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
for item in raw.journal.publisher_name or []:
publisher = ET.Element('publisher')
publisher.text = item
xml.find('./record').append(publisher)
return data
class XMLArticleMetaIdPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
uniquearticleid = ET.Element('publisherRecordId')
uniquearticleid.text = raw.publisher_id
xml.find('./record').append(uniquearticleid)
return data
class XMLArticleMetaArticleIdDOIPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articleiddoi = ET.Element('doi')
articleiddoi.text = raw.doi
xml.find('./record').append(articleiddoi)
return data
class XMLArticleMetaTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
raw.original_language()
if raw.original_title():
title = ET.Element('title')
title.text = raw.original_title()
title.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
xml.find('./record').append(title)
elif raw.translated_titles() and len(raw.translated_titles()) != 0:
item = [(k, v) for k, v in raw.translated_titles().items()][0]
title = ET.Element('title')
title.text = item[1]
title.set('language', ISO6392T_TO_ISO6392B.get(item[0], item[0]))
xml.find('./record').append(title)
return data
class XMLArticleMetaAuthorsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
contribgroup = ET.Element('authors')
for author in raw.authors:
names = [author.get('given_names', ''), author.get('surname', '')]
contribname = ET.Element('name')
contribname.text = ' '.join(names)
contrib = ET.Element('author')
contrib.append(contribname)
for xr in author.get('xref', []):
xref = ET.Element('affiliationId')
xref.text = xr
contrib.append(xref)
contribgroup.append(contrib)
xml.find('./record').append(contribgroup)
return data
class XMLArticleMetaAffiliationPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
affs = ET.Element('affiliationsList')
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
aff = ET.Element('affiliationName')
aff.set('affiliationId', affiliation['index'])
aff.text = affiliation['institution']
affs.append(aff)
xml.find('./record').append(affs)
return data
class XMLArticleMetaPublicationDatePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
pdate = raw.publication_date.split('-')
if len(pdate) == 1:
pdate = '-'.join(pdate + ['00', '00'])
elif len(pdate) == 2:
pdate = '-'.join(pdate + ['00'])
else:
pdate = '-'.join(pdate)
pubdate = ET.Element('publicationDate')
pubdate.text = pdate
xml.find('./record').append(pubdate)
return data
class XMLArticleMetaStartPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
startpage = ET.Element('startPage')
startpage.text = raw.start_page
xml.find('./record').append(startpage)
return data
class XMLArticleMetaEndPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
endpage = ET.Element('endPage')
endpage.text = raw.end_page
xml.find('./record').append(endpage)
return data
class XMLArticleMetaVolumePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.issue.volume:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
volume = ET.Element('volume')
volume.text = raw.issue.volume
xml.find('./record').append(volume)
return data
class XMLArticleMetaIssuePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
label_volume = raw.issue.volume.replace('ahead', '0') if raw.issue.volume else '0'
label_issue = raw.issue.number.replace('ahead', '0') if raw.issue.number else '0'
vol = ET.Element('volume')
vol.text = label_volume.strip()
label_suppl_issue = ' suppl %s' % raw.issue.supplement_number if raw.issue.supplement_number else ''
if label_suppl_issue:
label_issue += label_suppl_issue
label_suppl_volume = ' suppl %s' % raw.issue.supplement_volume if raw.issue.supplement_volume else ''
if label_suppl_volume:
label_issue += label_suppl_volume
label_issue = SUPPLBEG_REGEX.sub('', label_issue)
label_issue = SUPPLEND_REGEX.sub('', label_issue)
if label_issue.strip():
issue = ET.Element('issue')
issue.text = label_issue.strip()
xml.find('./record').append(issue)
return data
class XMLArticleMetaDocumentTypePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
documenttype = ET.Element('documentType')
documenttype.text = raw.document_type
xml.find('./record').append(documenttype)
return data
class XMLArticleMetaFullTextUrlPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.html_url:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
url = ET.Element('fullTextUrl')
url.set('format', 'html')
url.text = raw.html_url(language='en')
xml.find('./record').append(url)
return data
class XMLArticleMetaAbstractsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.original_abstract():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
abstract.text = raw.original_abstract()
articlemeta.append(abstract)
if raw.translated_abstracts():
for lang, text in raw.translated_abstracts().items():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
abstract.text = text
articlemeta.append(abstract)
return data
class XMLArticleMetaKeywordsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.keywords():
for lang, keywords in raw.keywords().items():
kwdgroup = ET.Element('keywords')
kwdgroup.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
for keyword in keywords:
kwd = ET.Element('keyword')
kwd.text = keyword
kwdgroup.append(kwd)
articlemeta.append(kwdgroup)
return data
class XMLClosePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
data = ET.tostring(xml, encoding="utf-8", method="xml")
return data
|
|
"""
Conformance tests which do not require an internet connection or HTML
parsing libraries, and so can be run as part of the normal test suite
of webcolors.
For tests which extract the relevant values, during the test run, from
the online standards documents (and so require both an internet
connection and an HTML parsing library), see the file
``definitions.py`` in this directory.
"""
import unittest
import webcolors
# The mappings of color names to values below are used for conformance
# testing; while the main webcolors module makes use of alphabetized,
# normalized mappings to hex values, the mappings below are the
# definitions in precisely the form they take in the relevant
# standards documents (they were produced via automated extraction
# from the HTML of those documents, to avoid the possibility of human
# copy/paste error).
#
# Sources are:
#
# HTML 4 colors: http://www.w3.org/TR/html401/types.html#h-6.5
#
# SVG colors (which CSS 3 adopted):
# http://www.w3.org/TR/SVG/types.html#ColorKeywords
#
# Conformance of this module with the relevant standards is proven by
# comparing its output to these mappings.
HTML4_COLOR_DEFINITIONS = {
"Black": "#000000",
"Silver": "#C0C0C0",
"Gray": "#808080",
"White": "#FFFFFF",
"Maroon": "#800000",
"Red": "#FF0000",
"Purple": "#800080",
"Fuchsia": "#FF00FF",
"Green": "#008000",
"Lime": "#00FF00",
"Olive": "#808000",
"Yellow": "#FFFF00",
"Navy": "#000080",
"Blue": "#0000FF",
"Teal": "#008080",
"Aqua": "#00FFFF",
}
SVG_COLOR_DEFINITIONS = {
"aliceblue": (240, 248, 255),
"antiquewhite": (250, 235, 215),
"aqua": (0, 255, 255),
"aquamarine": (127, 255, 212),
"azure": (240, 255, 255),
"beige": (245, 245, 220),
"bisque": (255, 228, 196),
"black": (0, 0, 0),
"blanchedalmond": (255, 235, 205),
"blue": (0, 0, 255),
"blueviolet": (138, 43, 226),
"brown": (165, 42, 42),
"burlywood": (222, 184, 135),
"cadetblue": (95, 158, 160),
"chartreuse": (127, 255, 0),
"chocolate": (210, 105, 30),
"coral": (255, 127, 80),
"cornflowerblue": (100, 149, 237),
"cornsilk": (255, 248, 220),
"crimson": (220, 20, 60),
"cyan": (0, 255, 255),
"darkblue": (0, 0, 139),
"darkcyan": (0, 139, 139),
"darkgoldenrod": (184, 134, 11),
"darkgray": (169, 169, 169),
"darkgreen": (0, 100, 0),
"darkgrey": (169, 169, 169),
"darkkhaki": (189, 183, 107),
"darkmagenta": (139, 0, 139),
"darkolivegreen": (85, 107, 47),
"darkorange": (255, 140, 0),
"darkorchid": (153, 50, 204),
"darkred": (139, 0, 0),
"darksalmon": (233, 150, 122),
"darkseagreen": (143, 188, 143),
"darkslateblue": (72, 61, 139),
"darkslategray": (47, 79, 79),
"darkslategrey": (47, 79, 79),
"darkturquoise": (0, 206, 209),
"darkviolet": (148, 0, 211),
"deeppink": (255, 20, 147),
"deepskyblue": (0, 191, 255),
"dimgray": (105, 105, 105),
"dimgrey": (105, 105, 105),
"dodgerblue": (30, 144, 255),
"firebrick": (178, 34, 34),
"floralwhite": (255, 250, 240),
"forestgreen": (34, 139, 34),
"fuchsia": (255, 0, 255),
"gainsboro": (220, 220, 220),
"ghostwhite": (248, 248, 255),
"gold": (255, 215, 0),
"goldenrod": (218, 165, 32),
"gray": (128, 128, 128),
"grey": (128, 128, 128),
"green": (0, 128, 0),
"greenyellow": (173, 255, 47),
"honeydew": (240, 255, 240),
"hotpink": (255, 105, 180),
"indianred": (205, 92, 92),
"indigo": (75, 0, 130),
"ivory": (255, 255, 240),
"khaki": (240, 230, 140),
"lavender": (230, 230, 250),
"lavenderblush": (255, 240, 245),
"lawngreen": (124, 252, 0),
"lemonchiffon": (255, 250, 205),
"lightblue": (173, 216, 230),
"lightcoral": (240, 128, 128),
"lightcyan": (224, 255, 255),
"lightgoldenrodyellow": (250, 250, 210),
"lightgray": (211, 211, 211),
"lightgreen": (144, 238, 144),
"lightgrey": (211, 211, 211),
"lightpink": (255, 182, 193),
"lightsalmon": (255, 160, 122),
"lightseagreen": (32, 178, 170),
"lightskyblue": (135, 206, 250),
"lightslategray": (119, 136, 153),
"lightslategrey": (119, 136, 153),
"lightsteelblue": (176, 196, 222),
"lightyellow": (255, 255, 224),
"lime": (0, 255, 0),
"limegreen": (50, 205, 50),
"linen": (250, 240, 230),
"magenta": (255, 0, 255),
"maroon": (128, 0, 0),
"mediumaquamarine": (102, 205, 170),
"mediumblue": (0, 0, 205),
"mediumorchid": (186, 85, 211),
"mediumpurple": (147, 112, 219),
"mediumseagreen": (60, 179, 113),
"mediumslateblue": (123, 104, 238),
"mediumspringgreen": (0, 250, 154),
"mediumturquoise": (72, 209, 204),
"mediumvioletred": (199, 21, 133),
"midnightblue": (25, 25, 112),
"mintcream": (245, 255, 250),
"mistyrose": (255, 228, 225),
"moccasin": (255, 228, 181),
"navajowhite": (255, 222, 173),
"navy": (0, 0, 128),
"oldlace": (253, 245, 230),
"olive": (128, 128, 0),
"olivedrab": (107, 142, 35),
"orange": (255, 165, 0),
"orangered": (255, 69, 0),
"orchid": (218, 112, 214),
"palegoldenrod": (238, 232, 170),
"palegreen": (152, 251, 152),
"paleturquoise": (175, 238, 238),
"palevioletred": (219, 112, 147),
"papayawhip": (255, 239, 213),
"peachpuff": (255, 218, 185),
"peru": (205, 133, 63),
"pink": (255, 192, 203),
"plum": (221, 160, 221),
"powderblue": (176, 224, 230),
"purple": (128, 0, 128),
"red": (255, 0, 0),
"rosybrown": (188, 143, 143),
"royalblue": (65, 105, 225),
"saddlebrown": (139, 69, 19),
"salmon": (250, 128, 114),
"sandybrown": (244, 164, 96),
"seagreen": (46, 139, 87),
"seashell": (255, 245, 238),
"sienna": (160, 82, 45),
"silver": (192, 192, 192),
"skyblue": (135, 206, 235),
"slateblue": (106, 90, 205),
"slategray": (112, 128, 144),
"slategrey": (112, 128, 144),
"snow": (255, 250, 250),
"springgreen": (0, 255, 127),
"steelblue": (70, 130, 180),
"tan": (210, 180, 140),
"teal": (0, 128, 128),
"thistle": (216, 191, 216),
"tomato": (255, 99, 71),
"turquoise": (64, 224, 208),
"violet": (238, 130, 238),
"wheat": (245, 222, 179),
"white": (255, 255, 255),
"whitesmoke": (245, 245, 245),
"yellow": (255, 255, 0),
"yellowgreen": (154, 205, 50),
}
class ConformanceTests(unittest.TestCase):
"""
Demonstrate that this module conforms to the relevant standards
documents governing colors on the Web.
"""
def test_html_definition_conformance(self):
"""
Compare the results of name-to-hex conversion to the canonical
hex values provided in the HTML 4 specification.
"""
for color, hex_value in HTML4_COLOR_DEFINITIONS.items():
normalized = webcolors.normalize_hex(hex_value)
assert normalized == webcolors.name_to_hex(color)
def test_svg_definition_conformance(self):
"""
Compare the results of name-to-rgb-triplet conversion to the
canonical triplet values provided in the SVG specification.
"""
for color, triplet in SVG_COLOR_DEFINITIONS.items():
assert triplet == webcolors.name_to_rgb(color)
|
|
#!/usr/bin/env python
#
# This client connects to the centralized game server
# via http. After creating a new game on the game
# server, it spaws an AI subprocess called "dropblox_ai."
# For each turn, this client passes in the current game
# state to a new instance of dropblox_ai, waits ten seconds
# for a response, then kills the AI process and sends
# back the move list.
#
import contextlib
import httplib
import os
import platform
import sys
import threading
import time
import urllib2
import json
from subprocess import Popen, PIPE
from helpers import messaging
from helpers import urllib2_file
# Python 2.7.9 enabled SSL cert validation by default. Unfortunately, many
# systems don't have their root certs set up correctly, which causes all HTTPS
# connections to fail. Until we figure out how to do SSL correctly, just
# disable cert validation.
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Remote server to connect to:
PROD_HOST = 'playdropblox.com'
PROD_SSL = True # currently server requires this to be True
if PROD_SSL:
PROD_PORT = 443
else:
PROD_PORT = 80
# Subprocess
LEFT_CMD = 'left'
RIGHT_CMD = 'right'
UP_CMD = 'up'
DOWN_CMD = 'down'
ROTATE_CMD = 'rotate'
VALID_CMDS = [LEFT_CMD, RIGHT_CMD, UP_CMD, DOWN_CMD, ROTATE_CMD]
MY_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_FILE_PATH = os.path.join(MY_DIR, "config.txt")
NUM_HTTP_RETRIES = 2 # number of times to retry if http connection fails
is_windows = platform.system() == "Windows"
# Printing utilities
# TODO(astaley): Consider vetting and using colorama module for windows support
# default to no colors
colorred = '{0}'
colorgrn = colorred
try:
import curses
curses.setupterm()
num_colors = curses.tigetnum('colors')
except Exception: # no term support (windows; piping to file; etc.)
pass
else:
if num_colors >= 8:
colorred = "\033[01;31m{0}\033[00m"
colorgrn = "\033[1;36m{0}\033[00m"
class Command(object):
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = list(args)
def run(self, timeout):
cmds = []
process = Popen([self.cmd] + self.args, stdout=PIPE, universal_newlines=True,
shell=is_windows)
def target():
for line in iter(process.stdout.readline, ''):
line = line.rstrip('\n')
if line not in VALID_CMDS:
print 'INVALID COMMAND:', line # Forward debug output to terminal
else:
cmds.append(line)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
print colorred.format('Terminating process')
try:
process.terminate()
thread.join(60)
except Exception:
pass
print colorgrn.format('commands received: %s' % cmds)
return cmds
class AuthException(Exception):
pass
class GameOverError(Exception):
def __init__(self, game_state_dict):
self.game_state_dict = game_state_dict
class DropbloxServer(object):
def __init__(self, team_name, team_password, host, port, ssl):
# maybe support any transport
# but whatever
# TODO(astaley): Consider using persistent http connections to speed up client
# Available in httplib or picloud's urllib2file
self.host = host
self.port = port
self.ssl = ssl
self.team_name = team_name
self.team_password = team_password
def _request(self, path, tbd):
schema = 'https' if self.ssl else 'http'
url = '%s://%s:%d%s' % (schema, self.host, self.port, path)
tbd = dict(tbd)
tbd['team_name'] = self.team_name
tbd['password'] = self.team_password
data = json.dumps(tbd)
req = urllib2.Request(url, data, {
'Content-Type': 'application/json'
})
for retry in range(NUM_HTTP_RETRIES+1):
try:
with contextlib.closing(urllib2_file.urlopen(req)) as resp:
return json.loads(resp.read())
except urllib2.HTTPError, err:
if err.code == 401:
raise AuthException()
if 500 <= err.code < 600:
if retry < NUM_HTTP_RETRIES:
print colorred.format('Received http error %s. Retrying...' % str(err))
time.sleep(0.5)
continue
else:
raise
else:
raise
except (urllib2.URLError, httplib.HTTPException), err:
if retry < NUM_HTTP_RETRIES:
print colorred.format('Received %s error %s. Retrying...'
% (type(err), str(err)))
time.sleep(0.5)
continue
else:
raise
def create_practice_game(self):
return self._request("/create_practice_game", {})
def get_compete_game(self):
# return None if game is not ready to go yet
resp = self._request("/get_compete_game", {})
return resp
def submit_game_move(self, game_id, move_list, moves_made):
resp = self._request("/submit_game_move", {
'game_id': game_id,
'move_list': move_list,
'moves_made': moves_made,
})
if resp['ret'] == 'ok':
return resp
elif resp['ret'] == 'fail':
if resp['code'] == messaging.CODE_GAME_OVER:
raise GameOverError(resp['game']['game_state'])
elif resp['code'] == messaging.CODE_CONCURRENT_MOVE and \
resp['game']['number_moves_made'] == moves_made + 1:
# duplicate move; possible http error earlier - allow
print colorred.format('Duplicate move sent; resolving')
return resp
else:
raise Exception("Bad move: %r:%r",
resp['code'], resp['reason'])
raise Exception("Bad response: %r" % (resp,))
def run_ai(game_state_dict, seconds_remaining, ai_executable_absolute):
ai_arg_one = json.dumps(game_state_dict)
ai_arg_two = json.dumps(seconds_remaining)
command = Command(ai_executable_absolute, ai_arg_one, ai_arg_two)
ai_cmds = command.run(timeout=float(ai_arg_two))
#print(ai_arg_one)
return ai_cmds
def run_game(server, game, ai_executable_absolute):
game_id = game['game']['id']
while True:
moves_made = game['game']['number_moves_made']
ai_cmds = run_ai(game['game']['game_state'],
game['competition_seconds_remaining'],
ai_executable_absolute)
try:
game = server.submit_game_move(game_id, ai_cmds, moves_made)
except GameOverError, e:
final_game_state_dict = e.game_state_dict
break
print colorgrn.format("Game over! Your score was: %s" %
(final_game_state_dict['score'],))
def setup_compete(server):
# TODO: it might be better for this to be an actual game object
# instead of the dictionary serialization of it
new_game = server.get_compete_game()
# HAX: didn't have time to clean up this abstraction
if new_game['ret'] == 'wait':
wait_time = float(new_game.get('wait_time', 0.5))
print colorred.format("Waiting to compete...")
while new_game['ret'] == 'wait':
time.sleep(wait_time)
new_game = server.get_compete_game()
# HAX: didn't have time to clean up this abstraction
if new_game['ret'] == 'wait':
wait_time = float(new_game.get('wait_time', 0.5))
print colorred.format("Fired up and ready to go!")
return new_game
def setup_practice(server):
# TODO: it might be better for this to be an actual game object
# instead of the dictionary serialization of it
return server.create_practice_game()
def main():
if not os.path.exists(CONFIG_FILE_PATH):
print colorred.format("Couldn't find config file at \"{}\"".format(CONFIG_FILE_PATH))
return 1
with open(CONFIG_FILE_PATH, 'r') as f:
team_name = f.readline().rstrip('\n')
team_password = f.readline().rstrip('\n')
if team_name == "TEAM_NAME_HERE" or team_password == "TEAM_PASSWORD_HERE":
print colorred.format("Please specify a team name and password in config.txt")
return 1
args = sys.argv[1:]
entry_mode = None
if len(args) == 1:
entry_mode, ai_executable = args[0], "dropblox_ai"
elif len(args) == 2:
entry_mode, ai_executable = args[0], args[1]
if entry_mode not in ("compete", "practice"):
print colorred.format("Usage: client.py <compete|practice> [ai_executable]")
return 1
ai_executable_absolute = os.path.abspath(ai_executable)
if os.environ.get('DROPBLOX_DEBUG'):
connect_details = ('localhost', 8080, False)
else:
connect_details = (PROD_HOST, PROD_PORT, PROD_SSL)
server = DropbloxServer(team_name, team_password, *connect_details)
if entry_mode == "practice":
setup_func = setup_practice
elif entry_mode == "compete":
setup_func = setup_compete
else:
assert False, 'mode = %r' % entry_mode
try:
new_game = setup_func(server)
except AuthException:
print colorred.format("Cannot authenticate, please check {}".format(CONFIG_FILE_PATH))
return 1
run_game(server, new_game, ai_executable_absolute)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
import ast, sys, json
"""
Jsvee Python transpiler
(C) Teemu Sirkia, 2016
Licensed under MIT License.
"""
class ParseResult:
def __init__(self):
self.steps = []
self.initSteps = []
self.positionStack = ['0']
self.line = 0
self.labelCounter = 0
self.iteratorCounter = 0
self.breakStack = []
self.functions = []
self.classes = []
self.classesWithInit = []
self.firstLine = True
def checkLine(self, line):
if line != self.line:
if self.firstLine:
self.initSteps.append(['setLine', line])
self.firstLine = False
else:
self.steps.append(['setLine', line])
self.line = line
def getNextIterator(self):
self.iteratorCounter += 1
return 'i' + str(self.iteratorCounter)
def getNextLabel(self):
self.labelCounter += 1
return 'l' + str(self.labelCounter)
def moveRight(self):
parts = self.positionStack[-1].split('/')
parts[-1] = str(int(parts[-1]) + 1)
self.positionStack[-1] = '/'.join(parts)
def moveLeft(self):
parts = self.positionStack[-1].split('/')
parts[-1] = str(int(parts[-1]) - 1)
self.positionStack[-1] = '/'.join(parts)
def moveParentRight(self):
parts = self.positionStack[-1].split('/')
parts[-2] = str(int(parts[-2]) + 1)
self.positionStack[-1] = '/'.join(parts)
def moveDown(self):
self.positionStack.append(self.positionStack[-1] + '/0/0')
def moveUp(self):
self.positionStack.pop()
def getPosition(self):
return self.positionStack[-1]
def resetPosition(self):
self.positionStack = ['0']
def addInitStep(self, step):
stepAsString = '|'.join([str(x) for x in step])
for s in self.initSteps:
ss = '|'.join([str(x) for x in s])
if ss == stepAsString:
return
self.initSteps.append(step)
# *********************************************************************************************************************
def handleAssign(node, line, result):
result.checkLine(line)
# TODO:
assert len(node.targets) == 1
name = node.targets[0].__class__.__name__
target = node.targets[0]
if name == 'Name':
traverseCode(node.value, line, result)
result.steps.append(['assign', node.targets[0].id])
result.resetPosition()
elif name == 'Subscript':
assert len(target.slice._fields) == 1 and 'value' in target.slice._fields
pos = result.getPosition()
traverseCode(target.value, line, result)
result.steps.append(['addOperator', '[ ] =', result.getPosition()])
result.addInitStep(['createOperator', '[ ] =', 'pr', '', '[ # ] = #'])
result.moveLeft()
result.moveDown()
result.moveParentRight()
traverseCode(target.slice.value, line, result)
result.moveLeft()
result.moveParentRight()
traverseCode(node.value, line, result)
result.steps.append(['setValueAtIndex', pos])
result.resetPosition()
elif name == 'Attribute':
traverseCode(node.value, line, result)
assert node.targets[0].value.__class__.__name__ == 'Name'
result.steps.append(['assignField', node.targets[0].attr, '@' + node.targets[0].value.id])
result.resetPosition()
else:
assert False
def handleAugAssign(node, line, result):
assert node.target.__class__.__name__ == 'Name'
result.checkLine(line)
result.steps.append(['addValueFromVariable', node.target.id, result.getPosition()])
result.moveRight()
ops = {'Mult': '*', 'Add': '+', 'Sub': '-', 'Div': '/', 'Pow': '**'}
name = node.op.__class__.__name__
position = result.getPosition()
if name in ops:
result.steps.append(['addOperator', ops[name], position])
result.addInitStep(['createOperator', ops[name], 'lr'])
result.moveRight()
else:
sys.stderr.write('Warning: Unknown operator {}.\n'.format(name))
traverseCode(node.value, line, result)
result.steps.append(['evaluateOperator', position])
result.steps.append(['assign', node.target.id])
result.resetPosition()
def handleAttribute(node, line, result):
# TODO: attribute chains
type = name = node.value.__class__.__name__
assert type == 'Name'
result.steps.append(['addValueFromField', node.attr, '@' + node.value.id, result.getPosition()])
result.moveRight()
def handleBinOp(node, line, result):
ops = {'Mult': '*', 'Add': '+', 'Sub': '-', 'Div': '/', 'Pow': '**'}
name = node.op.__class__.__name__
traverseCode(node.left, line, result)
position = result.getPosition()
if name in ops:
result.steps.append(['addOperator', ops[name], position])
result.addInitStep(['createOperator', ops[name], 'lr'])
result.moveRight()
else:
sys.stderr.write('Warning: Unknown operator {}.\n'.format(name))
traverseCode(node.right, line, result)
result.steps.append(['evaluateOperator', position])
result.moveLeft()
result.moveLeft()
def handleBoolOp(node, line, result):
ops = {'And' : 'and', 'Or' : 'or'}
name = node.op.__class__.__name__
traverseCode(node.values[0], line, result)
pos = result.getPosition()
result.steps.append(['addOperator', ops[name], pos])
result.addInitStep(['createOperator', ops[name], 'lr'])
label1 = result.getNextLabel()
label2 = result.getNextLabel()
result.steps.append(['evaluateOperator', pos])
result.steps.append(['_conditionalJump', '@' + label1, '@' + label2])
result.steps.append(['_label', label1])
result.steps.append(['removeElement_', label1])
result.moveLeft()
traverseCode(node.values[1], line, result)
result.steps.append(['_label', label2])
def handleBreak(node, line, result):
assert len(result.breakStack) > 0
result.checkLine(line)
label = result.breakStack[-1][1]
result.steps.append(['goto', '@' + label])
def handleCall(node, line, result):
result.checkLine(line)
position = result.getPosition()
name = node.func.__class__.__name__
if name == 'Name':
# Creating class instance or call function?
if node.func.id not in result.classes:
if node.func.id not in result.functions:
params = 'abcdefghijklmnopq'[0:len(node.args)]
if node.func.id != 'print':
result.addInitStep(['createFunction', node.func.id, node.func.id + '(' + ', '.join(params) + ')', len(node.args), '-1'])
else:
result.addInitStep(['createFunction', node.func.id, node.func.id + '(a, ...)', -1, '-1'])
result.steps.append(['addFunction', node.func.id, position, len(node.args)])
result.moveDown()
for n in node.args:
traverseCode(n, line, result)
result.moveLeft()
result.moveParentRight()
result.moveUp()
result.moveRight()
result.steps.append(['evaluateFunction', position])
else:
result.steps.append(['createInstance', node.func.id])
result.steps.append(['addReference', '-1', position])
if node.func.id in result.classesWithInit:
result.steps.append(['addFunction', '__init__', position, len(node.args), '?'])
result.moveDown()
result.moveParentRight()
for n in node.args:
traverseCode(n, line, result)
result.moveLeft()
result.moveParentRight()
result.moveUp()
result.moveRight()
result.steps.append(['evaluateFunction', position])
elif name == 'Attribute':
type = node.func.value.__class__.__name__
traverseCode(node.func.value, line, result)
result.steps.append(['addFunction', node.func.attr, result.getPosition(), len(node.args), '?'])
result.moveLeft()
if node.func.attr == 'append' and 'list' in result.classes:
result.addInitStep(['createClass', 'list'])
result.addInitStep(['createFunction', 'append', 'append' + '(item)', '1', '-1', 'list'])
if node.func.attr == 'split':
result.addInitStep(['createClass', 'str'])
result.addInitStep(['createFunction', 'split', 'split(sep)', '1', '-1', 'str'])
result.moveDown()
result.moveParentRight()
for n in node.args:
traverseCode(n, line, result)
result.moveLeft()
result.moveParentRight()
result.moveUp()
result.moveRight()
result.steps.append(['evaluateFunction', position])
else:
assert False
def handleClassDef(node, line, result):
# TODO:
assert len(node.bases) == 0
result.initSteps.append(['createClass', node.name])
result.classes.append(node.name)
for n in node.body:
name = n.__class__.__name__
if name == 'FunctionDef':
if n.name == '__init__':
result.classesWithInit.append(node.name)
handleFunctionDef(n, line, result, node.name, n.name == '__init__')
def handleCompare(node, line, result):
ops = {'Gt': '>', 'Lt': '<', 'GtE': '>=', 'LtE': '<=', 'In': 'in', 'NotIn': 'not in', 'Eq': '==', 'NotEq': '!='}
traverseCode(node.left, line, result)
# TODO:
assert len(node.comparators) == 1
for i in range(len(node.comparators)):
name = node.ops[i].__class__.__name__
position = result.getPosition()
if name in ops:
result.steps.append(['addOperator', ops[name], position])
result.addInitStep(['createOperator', ops[name], 'lr'])
result.moveRight()
else:
sys.stderr.write('Warning: Unknown operator {}.\n'.format(name))
traverseCode(node.comparators[i], line, result)
result.steps.append(['evaluateOperator', position])
result.moveLeft()
result.moveLeft()
def handleContinue(node, line, result):
assert len(result.breakStack) > 0
result.checkLine(line)
label = result.breakStack[-1][0]
result.steps.append(['goto', '@' + label])
def handleDict(node, line, result):
result.steps.append(['createInstance', 'dict'])
# TODO:
assert len(node.keys) == 0 and len(node.values) == 0
pos = result.getPosition()
result.steps.append(['addReference', '-1', pos])
result.moveRight()
def handleExpr(node, line, result):
result.checkLine(line)
for node in ast.iter_child_nodes(node):
traverseCode(node, line, result)
result.resetPosition()
def handleFor(node, line, result):
# TODO: for x in range(3)
assert node.iter.__class__.__name__ == 'Name' or (node.iter.__class__.__name__ == 'Call' and node.iter.func.id == 'range')
assert node.target.__class__.__name__ == 'Name'
rangeFor = False
if node.iter.__class__.__name__ == 'Call' and node.iter.func.id == 'range':
rangeFor = True
result.checkLine(line)
label1 = result.getNextLabel()
label2 = result.getNextLabel()
label3 = result.getNextLabel()
label4 = result.getNextLabel()
result.breakStack.append([label1, label3])
iterator = result.getNextIterator()
result.checkLine(line)
if not rangeFor:
result.steps.append(['_createIterator', iterator, '@' + node.iter.id])
else:
handleCall(node.iter, line, result)
result.resetPosition()
result.steps.append(['clearEvaluationArea_'])
result.steps.append(['_createIterator', iterator, '-1'])
result.steps.append(['_label', label1])
result.steps.append(['_iterate', iterator, '@' + label2, '@' + label3])
result.steps.append(['_label', label2])
result.steps.append(['takeNext', iterator, result.getPosition()])
result.steps.append(['assign', node.target.id])
for n in node.body:
traverseCode(n, line, result)
result.steps.append(['setLine', line])
result.steps.append(['goto', '@' + label1])
result.steps.append(['_label', label3])
for n in node.orelse:
traverseCode(n, line, result)
result.steps.append(['_label', label4])
result.breakStack.pop()
def handleFunctionDef(node, line, result, className=None, isCtor=False):
args = [x.arg for x in node.args.args]
label = result.getNextLabel()
label2 = result.getNextLabel()
argCount = len(args)
if className != None:
argCount -= 1
command = ['createFunction', node.name, node.name + '(' + ', '.join(args) + ')', argCount, '@' + label]
if className != None:
command.append(className)
else:
result.functions.append(node.name)
result.initSteps.append(command)
result.steps.append(['goto', '@' + label2])
result.steps.append(['_label', label])
result.checkLine(line)
if len(args) > 0:
result.steps.append(['createParameterVariables', args])
result.steps.append(['assignParameters', args])
for n in node.body:
traverseCode(n, line, result)
if result.steps[-1][0] != 'returnValue' and not isCtor:
result.steps.append(['clearEvaluationArea'])
result.resetPosition()
result.steps.append(['addValue', 'None', '0', 'NoneType'])
result.steps.append(['returnValue'])
elif isCtor:
result.steps.append(['clearEvaluationArea_', 'self', '0'])
result.steps.append(['addValueFromVariable', 'self', '0'])
result.steps.append(['returnValue'])
result.steps.append(['_label', label2])
def handleIf(node, line, result):
result.checkLine(line)
label1 = result.getNextLabel()
label2 = result.getNextLabel()
label3 = result.getNextLabel()
traverseCode(node.test, line, result)
result.resetPosition()
result.steps.append(['_conditionalJump', '@' + label1, '@' + label2])
result.steps.append(['_label', label1])
for n in node.body:
traverseCode(n, line, result)
result.steps.append(['goto', '@' + label3])
result.steps.append(['_label', label2])
for n in node.orelse:
traverseCode(n, line, result)
result.steps.append(['goto', '@' + label3])
result.steps.append(['_label', label3])
def handleList(node, line, result, type='list'):
result.steps.append(['createInstance', type])
pos = result.getPosition()
result.classes.append('list')
if len(node.elts) > 0:
result.steps.append(['addCollectionInitializer', '-1', pos, len(node.elts)])
result.moveDown()
result.moveParentRight()
for n in node.elts:
traverseCode(n, line, result)
result.moveLeft()
result.moveParentRight()
result.moveUp()
result.moveRight()
result.steps.append(['initializeCollection', pos])
else:
result.steps.append(['addReference', '-1', pos])
result.moveRight()
def handleName(node, line, result):
# Python < 3.5
if node.id == 'True':
result.steps.append(['addValue', 'True', result.getPosition(), 'bool'])
elif node.id == 'False':
result.steps.append(['addValue', 'False', result.getPosition(), 'bool'])
elif node.id == 'None':
result.steps.append(['addValue', 'None', result.getPosition(), 'NoneType'])
else:
result.steps.append(['addValueFromVariable', node.id, result.getPosition()])
result.moveRight()
def handleNameConstant(node, line, result):
# Python >= 3.5
if node.value == True:
result.steps.append(['addValue', 'True', result.getPosition(), 'bool'])
elif node.value == False:
result.steps.append(['addValue', 'False', result.getPosition(), 'bool'])
elif node.value == None:
result.steps.append(['addValue', 'None', result.getPosition(), 'NoneType'])
else:
sys.stderr.write('NameConstant value ({}) not supported!\n'.format(node.value))
result.moveRight()
def handleNum(node, line, result):
result.steps.append(['addValue', str(node.n), result.getPosition(), node.n.__class__.__name__])
result.moveRight()
def handlePass(node, line, result):
pass
def handleReturn(node, line, result):
result.checkLine(line)
if node.value != None:
traverseCode(node.value, line, result)
else:
result.steps.append(['addValue', 'None', result.getPosition(), 'NoneType'])
result.steps.append(['returnValue'])
result.resetPosition()
def handleSubscript(node, line, result):
# TODO:
assert len(node.slice._fields) <= 2 or node.slice.step == None
pos = result.getPosition()
traverseCode(node.value, line, result)
if len(node.slice._fields) == 1:
result.steps.append(['addOperator', '[ ]', result.getPosition()])
result.addInitStep(['createOperator', '[ ]', 'pr', '', ' [ # ]'])
elif len(node.slice._fields) == 3 and node.slice.step == None:
result.steps.append(['addOperator', '[ : ]', result.getPosition()])
result.addInitStep(['createOperator', '[ : ]', 'pr', '', ' [ # : # ]'])
result.moveLeft()
result.moveDown()
result.moveParentRight()
if len(node.slice._fields) == 1:
traverseCode(node.slice.value, line, result)
elif len(node.slice._fields) == 3 and node.slice.step == None:
traverseCode(node.slice.lower, line, result)
result.moveLeft()
result.moveParentRight()
if node.slice.upper != None:
traverseCode(node.slice.upper, line, result)
result.steps.append(['getValueAtIndex', pos])
result.moveUp()
result.moveRight()
def handleStr(node, line, result):
result.steps.append(['addValue', node.s, result.getPosition(), 'str'])
result.moveRight()
def handleTuple(node, line, result):
handleList(node, line, result, 'tuple')
def handleUnaryOp(node, line, result):
ops = {'Not': 'not'}
name = node.op.__class__.__name__
position = result.getPosition()
if name == 'USub':
result.steps.append(['addValue', '-' + str(node.operand.n), result.getPosition(), node.operand.n.__class__.__name__])
elif name == 'UAdd':
result.steps.append(['addValue', node.operand.n, result.getPosition(), node.operand.n.__class__.__name__])
result.steps.append(['evaluateOperator', position])
result.moveLeft()
elif name in ops:
result.steps.append(['addOperator', ops[name], position])
result.addInitStep(['createOperator', ops[name], 'r'])
result.moveRight()
traverseCode(node.operand, line, result)
result.steps.append(['evaluateOperator', position])
result.moveLeft()
else:
sys.stderr.write('Warning: Unknown operator {}.\n'.format(name))
def handleWhile(node, line, result):
label0 = result.getNextLabel()
result.steps.append(['_label', label0])
result.checkLine(line)
label1 = result.getNextLabel()
label2 = result.getNextLabel()
label3 = result.getNextLabel()
result.breakStack.append([label0, label3])
traverseCode(node.test, line, result)
result.resetPosition()
result.steps.append(['_conditionalJump', '@' + label1, '@' + label2])
result.steps.append(['_label', label1])
for n in node.body:
traverseCode(n, line, result)
result.steps.append(['goto', '@' + label0])
result.steps.append(['_label', label2])
for n in node.orelse:
traverseCode(n, line, result)
result.steps.append(['_label', label3])
result.breakStack.pop()
# *********************************************************************************************************************
def traverseCode(node, line, result):
name = node.__class__.__name__
if hasattr(node, 'lineno'):
line = node.lineno
if 'handle' + name in globals():
globals()['handle' + name](node, line, result)
else:
sys.stderr.write('Warning: No handler for {}.\n'.format(name))
def main():
code = """
a = [1, 2, 3, 4, 5]
print(a[1:-1])
""".strip()
tree = ast.parse(code)
result = ParseResult()
for node in ast.iter_child_nodes(tree):
traverseCode(node, 0, result)
result.initSteps.append(['createFrame'])
resultJSON = {'lines':code.split('\n'), 'settings':{'code':'left', 'heapHeight':0, 'stackHeight': 250, 'width':800}, 'init': result.initSteps, 'steps': result.steps}
print(json.dumps(resultJSON))
# *********************************************************************************************************************
if __name__ == '__main__':
main()
|
|
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_unicode(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_unicode(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_unicode(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception, e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_unicode(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import multiprocessing as mp
import platform
import sys
import textwrap
from . import console, util
from .console import log, color_print
def iter_machine_files(results_dir):
"""
Iterate over all of the machine.json files in the results_dir
"""
for root, dirs, files in os.walk(results_dir):
for filename in files:
if filename == 'machine.json':
path = os.path.join(root, filename)
yield path
def _get_unique_machine_name():
(system, node, release, version, machine, processor) = platform.uname()
return node
class MachineCollection:
"""
Stores information about 1 or more machines in the
~/.asv-machine.json file.
"""
api_version = 1
@staticmethod
def get_machine_file_path():
return os.path.expanduser('~/.asv-machine.json')
@classmethod
def load(cls, machine_name, _path=None):
if _path is None:
path = cls.get_machine_file_path()
else:
path = _path
d = {}
if os.path.isfile(path):
d = util.load_json(path, cls.api_version)
if machine_name in d:
return d[machine_name]
elif len(d) == 1 and machine_name == _get_unique_machine_name():
return d[list(d.keys())[0]]
raise util.UserError(
"No information stored about machine '{0}'. I know about {1}.".format(
machine_name, util.human_list(d.keys())))
@classmethod
def save(cls, machine_name, machine_info, _path=None):
if _path is None:
path = cls.get_machine_file_path()
else:
path = _path
if os.path.isfile(path):
d = util.load_json(path)
else:
d = {}
d[machine_name] = machine_info
util.write_json(path, d, cls.api_version)
@classmethod
def update(cls, _path=None):
if _path is None:
path = cls.get_machine_file_path()
else:
path = _path
if os.path.isfile(path):
util.update_json(cls, path, cls.api_version)
class Machine:
"""
Stores information about a particular machine.
"""
api_version = 1
fields = [
("machine",
"""
A unique name to identify this machine in the results. May
be anything, as long as it is unique across all the machines used
to benchmark this project.
NOTE: If changed from the default, it will no longer match
the hostname of this machine, and you may need to explicitly
use the --machine argument to asv.
"""),
("os",
"""
The OS type and version of this machine. For example,
'Macintosh OS-X 10.8'."""),
("arch",
"""
The generic CPU architecture of this machine. For
example, 'i386' or 'x86_64'."""),
("cpu",
"""
A specific description of the CPU of this machine,
including its speed and class. For example, 'Intel(R)
Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)'."""),
("num_cpu",
"""
The number of CPUs in the system. For example,
'4'."""),
("ram",
"""
The amount of physical RAM on this machine. For example,
'4GB'.""")
]
hardcoded_machine_name = None
@classmethod
def get_unique_machine_name(cls):
if cls.hardcoded_machine_name:
return cls.hardcoded_machine_name
return _get_unique_machine_name()
@staticmethod
def get_defaults():
(system, node, release, version, machine, processor) = platform.uname()
cpu = util.get_cpu_info()
ram = str(util.get_memsize())
num_cpu = str(mp.cpu_count())
return {
'machine': node,
'os': "{0} {1}".format(system, release),
'num_cpu': num_cpu,
'arch': platform.machine(),
'cpu': cpu,
'ram': ram}
@staticmethod
def generate_machine_file(use_defaults=False):
if not sys.stdout.isatty() and not use_defaults:
raise util.UserError(
"Run asv at the console the first time to generate "
"one, or run `asv machine --yes`.")
log.flush()
color_print(
"I will now ask you some questions about this machine to "
"identify it in the benchmarks.")
color_print("")
defaults = Machine.get_defaults()
values = {}
for i, (name, description) in enumerate(Machine.fields):
print(
textwrap.fill(
'{0}. {1}: {2}'.format(
i + 1, name, textwrap.dedent(description)),
subsequent_indent=' '))
values[name] = console.get_answer_default(name, defaults[name],
use_defaults=use_defaults)
return values
@classmethod
def load(cls, interactive=False, force_interactive=False, _path=None,
machine_name=None, use_defaults=False, **kwargs):
self = Machine()
if machine_name is None:
machine_name = cls.get_unique_machine_name()
try:
d = MachineCollection.load(machine_name, _path=_path)
except util.UserError as e:
console.log.error(str(e) + '\n')
d = {}
d.update(kwargs)
if (not len(d) and interactive) or force_interactive:
d.update(self.generate_machine_file(use_defaults=use_defaults))
machine_name = d['machine']
self.__dict__.update(d)
MachineCollection.save(machine_name, self.__dict__, _path=_path)
return self
def save(self, results_dir):
path = os.path.join(results_dir, self.machine, 'machine.json')
util.write_json(path, self.__dict__, self.api_version)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes used to enumerate surface sites
and to find adsorption sites on slabs
"""
import itertools
import os
import numpy as np
from matplotlib import patches
from matplotlib.path import Path
from monty.serialization import loadfn
from scipy.spatial import Delaunay
from pymatgen import vis
from pymatgen.analysis.local_env import VoronoiNN
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Structure
from pymatgen.core.surface import generate_all_slabs
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list_pbc
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Joseph Montoya"
__credits__ = "Richard Tran"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "December 2, 2015"
class AdsorbateSiteFinder:
"""
This class finds adsorbate sites on slabs and generates
adsorbate structures according to user-defined criteria.
The algorithm for finding sites is essentially as follows:
1. Determine "surface sites" by finding those within
a height threshold along the miller index of the
highest site
2. Create a network of surface sites using the Delaunay
triangulation of the surface sites
3. Assign on-top, bridge, and hollow adsorption sites
at the nodes, edges, and face centers of the Del.
Triangulation
4. Generate structures from a molecule positioned at
these sites
"""
def __init__(self, slab, selective_dynamics=False, height=0.9, mi_vec=None):
"""
Create an AdsorbateSiteFinder object.
Args:
slab (Slab): slab object for which to find adsorbate sites
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec (3-D array-like): vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
"""
# get surface normal from miller index
if mi_vec:
self.mvec = mi_vec
else:
self.mvec = get_mi_vec(slab)
slab = self.assign_site_properties(slab, height)
if selective_dynamics:
slab = self.assign_selective_dynamics(slab)
self.slab = slab
@classmethod
def from_bulk_and_miller(
cls,
structure,
miller_index,
min_slab_size=8.0,
min_vacuum_size=10.0,
max_normal_search=None,
center_slab=True,
selective_dynamics=False,
undercoord_threshold=0.09,
):
"""
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
"""
# TODO: for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vnn_bulk = VoronoiNN(tol=0.05)
bulk_coords = [len(vnn_bulk.get_nn(structure, n)) for n in range(len(structure))]
struct = structure.copy(site_properties={"bulk_coordinations": bulk_coords})
slabs = generate_all_slabs(
struct,
max_index=max(miller_index),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size,
max_normal_search=max_normal_search,
center_slab=center_slab,
)
slab_dict = {slab.miller_index: slab for slab in slabs}
if miller_index not in slab_dict:
raise ValueError("Miller index not in slab dict")
this_slab = slab_dict[miller_index]
vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)
surf_props, undercoords = [], []
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for n, site in enumerate(this_slab):
bulk_coord = this_slab.site_properties["bulk_coordinations"][n]
slab_coord = len(vnn_surface.get_nn(this_slab, n))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = (bulk_coord - slab_coord) / bulk_coord
undercoords += [undercoord]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag:
surf_props += ["surface"]
else:
surf_props += ["subsurface"]
new_site_properties = {
"surface_properties": surf_props,
"undercoords": undercoords,
}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):
"""
This method finds surface sites by determining which sites are within
a threshold value in height from the topmost site in a list of sites
Args:
site_list (list): list of sites from which to select surface sites
height (float): threshold in angstroms of distance from topmost
site in slab along the slab c-vector to include in surface
site determination
xy_tol (float): if supplied, will remove any sites which are
within a certain distance in the miller plane.
Returns:
list of sites selected to be within a threshold of the highest
"""
# Get projection of coordinates along the miller index
m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites])
# Mask based on window threshold along the miller index.
mask = (m_projs - np.amax(m_projs)) >= -height
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites.reverse()
unique_sites, unique_perp_fracs = [], []
for site in surf_sites:
this_perp = site.coords - np.dot(site.coords, self.mvec)
this_perp_frac = slab.lattice.get_fractional_coords(this_perp)
if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac):
unique_sites.append(site)
unique_perp_fracs.append(this_perp_frac)
surf_sites = unique_sites
return surf_sites
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if "surface_properties" in slab.site_properties.keys():
return slab
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ["surface" if site in surf_sites else "subsurface" for site in slab.sites]
return slab.copy(site_properties={"surface_properties": surf_props})
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
"""
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
"""
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
@property
def surface_sites(self):
"""
convenience method to return a list of surface sites
"""
return [site for site in self.slab.sites if site.properties["surface_properties"] == "surface"]
def subsurface_sites(self):
"""
convenience method to return list of subsurface sites
"""
return [site for site in self.slab.sites if site.properties["surface_properties"] == "subsurface"]
def find_adsorption_sites(
self,
distance=2.0,
put_inside=True,
symm_reduce=1e-2,
near_reduce=1e-2,
positions=["ontop", "bridge", "hollow"],
no_obtuse_hollow=True,
):
"""
Finds surface sites according to the above algorithm. Returns
a list of corresponding cartesian coordinates.
Args:
distance (float): distance from the coordinating ensemble
of atoms along the miller index for the site (i. e.
the distance from the slab itself)
put_inside (bool): whether to put the site inside the cell
symm_reduce (float): symm reduction threshold
near_reduce (float): near reduction threshold
positions (list): which positions to include in the site finding
"ontop": sites on top of surface sites
"bridge": sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
"hollow": sites at centers of Delaunay triangulation faces
"subsurface": subsurface positions projected into miller plane
no_obtuse_hollow (bool): flag to indicate whether to include
obtuse triangular ensembles in hollow sites
"""
ads_sites = {k: [] for k in positions}
if "ontop" in positions:
ads_sites["ontop"] = [s.coords for s in self.surface_sites]
if "subsurface" in positions:
# Get highest site
ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
# Project diff between highest site and subs site into miller
ss_sites = [
self.mvec * np.dot(ref.coords - s.coords, self.mvec) + s.coords for s in self.subsurface_sites()
]
ads_sites["subsurface"] = ss_sites
if "bridge" in positions or "hollow" in positions:
mesh = self.get_extended_surface_mesh()
sop = get_rot(self.slab)
dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
# TODO: refactor below to properly account for >3-fold
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
dots.append(np.dot(*vecs))
# Add bridge sites at midpoints of edges of D. Tri
if "bridge" in positions:
ads_sites["bridge"].append(self.ensemble_center(mesh, opp))
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
# Add hollow sites at centers of D. Tri faces
if "hollow" in positions and not obtuse:
ads_sites["hollow"].append(self.ensemble_center(mesh, v))
for key, sites in ads_sites.items():
# Pare off outer sites for bridge/hollow
if key in ["bridge", "hollow"]:
frac_coords = [self.slab.lattice.get_fractional_coords(ads_site) for ads_site in sites]
frac_coords = [
frac_coord
for frac_coord in frac_coords
if (frac_coord[0] > 1 and frac_coord[0] < 4 and frac_coord[1] > 1 and frac_coord[1] < 4)
]
sites = [self.slab.lattice.get_cartesian_coords(frac_coord) for frac_coord in frac_coords]
if near_reduce:
sites = self.near_reduce(sites, threshold=near_reduce)
if put_inside:
sites = [put_coord_inside(self.slab.lattice, coord) for coord in sites]
if symm_reduce:
sites = self.symm_reduce(sites, threshold=symm_reduce)
sites = [site + distance * self.mvec for site in sites]
ads_sites[key] = sites
ads_sites["all"] = sum(ads_sites.values(), [])
return ads_sites
def symm_reduce(self, coords_set, threshold=1e-6):
"""
Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking
"""
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
# Convert to fractional
coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords), atol=threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
# convert back to cartesian
return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
def near_reduce(self, coords_set, threshold=1e-4):
"""
Prunes coordinate set for coordinates that are within
threshold
Args:
coords_set (Nx3 array-like): list or array of coordinates
threshold (float): threshold value for distance
"""
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]
for coord in coords_set:
if not in_coord_list_pbc(unique_coords, coord, threshold):
unique_coords += [coord]
return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
@classmethod
def ensemble_center(cls, site_list, indices, cartesian=True):
"""
Finds the center of an ensemble of sites selected from
a list of sites. Helper method for the find_adsorption_sites
algorithm.
Args:
site_list (list of sites): list of sites
indices (list of ints): list of ints from which to select
sites from site list
cartesian (bool): whether to get average fractional or
cartesian coordinate
"""
if cartesian:
return np.average([site_list[i].coords for i in indices], axis=0)
return np.average([site_list[i].frac_coords for i in indices], axis=0)
def add_adsorbate(self, molecule, ads_coord, repeat=None, translate=True, reorient=True):
"""
Adds an adsorbate at a particular coordinate. Adsorbate
represented by a Molecule object and is translated to (0, 0, 0) if
translate is True, or positioned relative to the input adsorbate
coordinate if translate is False.
Args:
molecule (Molecule): molecule object representing the adsorbate
ads_coord (array): coordinate of adsorbate position
repeat (3-tuple or list): input for making a supercell of slab
prior to placing the adsorbate
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether to reorient the molecule to
have its z-axis concurrent with miller index
"""
molecule = molecule.copy()
if translate:
# Translate the molecule so that the center of mass of the atoms
# that have the most negative z coordinate is at (0, 0, 0)
front_atoms = molecule.copy()
front_atoms._sites = [s for s in molecule.sites if s.coords[2] == min(s.coords[2] for s in molecule.sites)]
x, y, z = front_atoms.center_of_mass
molecule.translate_sites(vector=[-x, -y, -z])
if reorient:
# Reorient the molecule along slab m_index
sop = get_rot(self.slab)
molecule.apply_operation(sop.inverse)
struct = self.slab.copy()
if repeat:
struct.make_supercell(repeat)
if "surface_properties" in struct.site_properties.keys():
molecule.add_site_property("surface_properties", ["adsorbate"] * molecule.num_sites)
if "selective_dynamics" in struct.site_properties.keys():
molecule.add_site_property("selective_dynamics", [[True, True, True]] * molecule.num_sites)
for site in molecule:
struct.append(
site.specie,
ads_coord + site.coords,
coords_are_cartesian=True,
properties=site.properties,
)
return struct
@classmethod
def assign_selective_dynamics(cls, slab):
"""
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
"""
sd_list = []
sd_list = [
[False, False, False] if site.properties["surface_properties"] == "subsurface" else [True, True, True]
for site in slab.sites
]
new_sp = slab.site_properties
new_sp["selective_dynamics"] = sd_list
return slab.copy(site_properties=new_sp)
def generate_adsorption_structures(
self,
molecule,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
find_args = find_args or {}
for coords in self.find_adsorption_sites(**find_args)["all"]:
structs.append(
self.add_adsorbate(
molecule,
coords,
repeat=repeat,
translate=translate,
reorient=reorient,
)
)
return structs
def adsorb_both_surfaces(
self,
molecule,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Function that generates all adsorption structures for a given
molecular adsorbate on both surfaces of a slab. This is useful
for calculating surface energy where both surfaces need to be
equivalent or if we want to calculate nonpolar systems.
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
# Get the adsorbed surfaces first
find_args = find_args or {}
adslabs = self.generate_adsorption_structures(
molecule,
repeat=repeat,
min_lw=min_lw,
translate=translate,
reorient=reorient,
find_args=find_args,
)
new_adslabs = []
for adslab in adslabs:
# Find the adsorbate sites and indices in each slab
_, adsorbates, indices = False, [], []
for i, site in enumerate(adslab.sites):
if site.surface_properties == "adsorbate":
adsorbates.append(site)
indices.append(i)
# Start with the clean slab
adslab.remove_sites(indices)
slab = adslab.copy()
# For each site, we add it back to the slab along with a
# symmetrically equivalent position on the other side of
# the slab using symmetry operations
for adsorbate in adsorbates:
p2 = adslab.get_symmetric_site(adsorbate.frac_coords)
slab.append(adsorbate.specie, p2, properties={"surface_properties": "adsorbate"})
slab.append(
adsorbate.specie,
adsorbate.frac_coords,
properties={"surface_properties": "adsorbate"},
)
new_adslabs.append(slab)
return new_adslabs
def generate_substitution_structures(
self,
atom,
target_species=None,
sub_both_sides=False,
range_tol=1e-2,
dist_from_surf=0,
):
"""
Function that performs substitution-type doping on the surface and
returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
Args:
atom (str): atom corresponding to substitutional dopant
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
target_species (list): List of specific species to substitute
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
target_species = target_species or []
# Get symmetrized structure in case we want to substitute both sides
sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()
# Define a function for substituting a site
def substitute(site, i):
slab = self.slab.copy()
props = self.slab.site_properties
if sub_both_sides:
# Find an equivalent site on the other surface
eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0]
for ii in eq_indices:
if f"{sym_slab[ii].frac_coords[2]:.6f}" != f"{site.frac_coords[2]:.6f}":
props["surface_properties"][ii] = "substitute"
slab.replace(ii, atom)
break
props["surface_properties"][i] = "substitute"
slab.replace(i, atom)
slab.add_site_property("surface_properties", props["surface_properties"])
return slab
# Get all possible substitution sites
substituted_slabs = []
# Sort sites so that we can define a range relative to the position of the
# surface atoms, i.e. search for sites above (below) the bottom (top) surface
sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2])
if sorted_sites[0].surface_properties == "surface":
d = sorted_sites[0].frac_coords[2] + dist_from_surf
else:
d = sorted_sites[-1].frac_coords[2] - dist_from_surf
for i, site in enumerate(sym_slab):
if d - range_tol < site.frac_coords[2] < d + range_tol:
if target_species and site.species_string in target_species:
substituted_slabs.append(substitute(site, i))
elif not target_species:
substituted_slabs.append(substitute(site, i))
matcher = StructureMatcher()
return [s[0] for s in matcher.group_structures(substituted_slabs)]
def get_mi_vec(slab):
"""
Convenience function which returns the unit vector aligned
with the miller index.
"""
mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1])
return mvec / np.linalg.norm(mvec)
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in itertools.product([x, y, z], [new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop
def put_coord_inside(lattice, cart_coordinate):
"""
converts a cartesian coordinate such that it is inside the unit cell.
"""
fc = lattice.get_fractional_coords(cart_coordinate)
return lattice.get_cartesian_coords([c - np.floor(c) for c in fc])
def reorient_z(structure):
"""
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
"""
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
# Get color dictionary
colors = loadfn(os.path.join(os.path.dirname(vis.__file__), "ElementColorSchemes.yaml"))
color_dict = {el: [j / 256.001 for j in colors["Jmol"][el]] for el in colors["Jmol"].keys()}
def plot_slab(
slab,
ax,
scale=0.8,
repeat=5,
window=1.5,
draw_unit_cell=True,
decay=0.2,
adsorption_sites=True,
inverse=False,
):
"""
Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis
inverse (bool): invert z axis to plot opposite surface
"""
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key=lambda x: x.coords[2])
alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]]
corner = slab.lattice.get_cartesian_coords(corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0] + verts[1]
# inverse coords, sites, alphas, to show other side of slab
if inverse:
alphas = np.array(reversed(alphas))
sites = list(reversed(sites))
coords = np.array(reversed(coords))
# Draw circles at sites and stack them accordingly
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius * scale
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, color="w", zorder=2 * n))
color = color_dict[sites[n].species_string]
ax.add_patch(
patches.Circle(
coord[:2] - lattsum * (repeat // 2),
r,
facecolor=color,
alpha=alphas[n],
edgecolor="k",
lw=0.3,
zorder=2 * n + 1,
)
)
# Adsorption sites
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
if inverse:
inverse_slab = orig_slab.copy()
inverse_slab.make_supercell([1, 1, -1])
asf = AdsorbateSiteFinder(inverse_slab)
ads_sites = asf.find_adsorption_sites()["all"]
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist() for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000)
# Draw unit cell
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0.0, 0.0]]
verts = [[0.0, 0.0]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor="none", lw=2, alpha=0.5, zorder=2 * n + 2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.0
extent = np.max(lattsum)
lim_array = [center - extent * window, center + extent * window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax
|
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# $URI:$
__version__=''' $Id$ '''
__doc__='''Gazillions of miscellaneous internal utility functions'''
import os, sys, imp, time, types
from base64 import decodestring as base64_decodestring, encodestring as base64_encodestring
try:
from cPickle import dumps as pickle_dumps, loads as pickle_loads, dump as pickle_dump, load as pickle_load
except ImportError:
from pickle import dumps as pickle_dumps, loads as pickle_loads, dump as pickle_dump, load as pickle_load
from reportlab import isPy3
from reportlab.lib.logger import warnOnce
from reportlab.lib.rltempfile import get_rl_tempfile, get_rl_tempdir, _rl_getuid
try:
from hashlib import md5
except ImportError:
import md5
def isFunction(v):
return type(v) == type(isFunction)
class c:
def m(self): pass
def isMethod(v,mt=type(c.m)):
return type(v) == mt
del c
def isModule(v):
return type(v) == type(sys)
def isSeq(v,_st=(tuple,list)):
return isinstance(v,_st)
def isNative(v):
return isinstance(v, str)
#isStr is supposed to be for arbitrary stringType
#isBytes for bytes strings only
#isUnicode for proper unicode
if isPy3:
_rl_NoneType=type(None)
bytesT = bytes
unicodeT = str
strTypes = (str,bytes)
def _digester(s):
return md5(s if isBytes(s) else s.encode('utf8')).hexdigest()
def asBytes(v,enc='utf8'):
return v if isinstance(v,bytes) else v.encode(enc)
def asUnicode(v,enc='utf8'):
return v if isinstance(v,str) else v.decode(enc)
def asUnicodeEx(v,enc='utf8'):
return v if isinstance(v,str) else v.decode(enc) if isinstance(v,bytes) else str(v)
def asNative(v,enc='utf8'):
return asUnicode(v,enc=enc)
uniChr = chr
def int2Byte(i):
return bytes([i])
def isStr(v):
return isinstance(v, (str,bytes))
def isBytes(v):
return isinstance(v, bytes)
def isUnicode(v):
return isinstance(v, str)
def isClass(v):
return isinstance(v, type)
def isNonPrimitiveInstance(x):
return not isinstance(x,(float,int,type,tuple,list,dict,str,bytes,complex,bool,slice,_rl_NoneType,
types.FunctionType,types.LambdaType,types.CodeType,
types.MappingProxyType,types.SimpleNamespace,
types.GeneratorType,types.MethodType,types.BuiltinFunctionType,
types.BuiltinMethodType,types.ModuleType,types.TracebackType,
types.FrameType,types.GetSetDescriptorType,types.MemberDescriptorType))
def instantiated(v):
return not isinstance(v,type)
from string import ascii_letters, ascii_uppercase, ascii_lowercase
from io import BytesIO, StringIO
def getBytesIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return BytesIO(buf)
return BytesIO()
_bytesIOType = BytesIO
def getStringIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return StringIO(buf)
return StringIO()
def bytestr(x,enc='utf8'):
if isinstance(x,str):
return x.encode(enc)
elif isinstance(x,bytes):
return x
else:
return str(x).encode(enc)
def encode_label(args):
return base64_encodestring(pickle_dumps(args)).strip().decode('latin1')
def decode_label(label):
return pickle_loads(base64_decodestring(label.encode('latin1')))
def rawUnicode(s):
'''converts first 256 unicodes 1-1'''
return s.decode('latin1') if not isinstance(s,str) else s
def rawBytes(s):
'''converts first 256 unicodes 1-1'''
return s.encode('latin1') if isinstance(s,str) else s
import builtins
rl_exec = getattr(builtins,'exec')
del builtins
def char2int(s):
return s if isinstance(s,int) else ord(s if isinstance(s,str) else s.decode('latin1'))
def rl_reraise(t, v, b=None):
if v.__traceback__ is not b:
raise v.with_traceback(b)
raise v
def rl_add_builtins(**kwd):
import builtins
for k,v in kwd.items():
setattr(builtins,k,v)
else:
bytesT = str
unicodeT = unicode
strTypes = basestring
if sys.hexversion >= 0x02000000:
def _digester(s):
return md5(s).hexdigest()
else:
# hexdigest not available in 1.5
def _digester(s):
return join(["%02x" % ord(x) for x in md5(s).digest()], '')
def asBytes(v,enc='utf8'):
return v if isinstance(v,str) else v.encode(enc)
def asNative(v,enc='utf8'):
return asBytes(v,enc=enc)
def uniChr(v):
return unichr(v)
def isStr(v):
return isinstance(v, basestring)
def isBytes(v):
return isinstance(v, str)
def isUnicode(v):
return isinstance(v, unicode)
def asUnicode(v,enc='utf8'):
return v if isinstance(v,unicode) else v.decode(enc)
def asUnicodeEx(v,enc='utf8'):
return v if isinstance(v,unicode) else v.decode(enc) if isinstance(v,str) else unicode(v)
def isClass(v):
return isinstance(v,(types.ClassType,type))
def isNonPrimitiveInstance(x):
return isinstance(x,types.InstanceType) or not isinstance(x,(float,int,long,type,tuple,list,dict,bool,unicode,str,buffer,complex,slice,types.NoneType,
types.FunctionType,types.LambdaType,types.CodeType,types.GeneratorType,
types.ClassType,types.UnboundMethodType,types.MethodType,types.BuiltinFunctionType,
types.BuiltinMethodType,types.ModuleType,types.FileType,types.XRangeType,
types.TracebackType,types.FrameType,types.EllipsisType,types.DictProxyType,
types.NotImplementedType,types.GetSetDescriptorType,types.MemberDescriptorType
))
def instantiated(v):
return not isinstance(v,type) and hasattr(v,'__class__')
int2Byte = chr
from StringIO import StringIO
def getBytesIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return StringIO(buf)
return StringIO()
getStringIO = getBytesIO
_bytesIOType = StringIO
def bytestr(x,enc='utf8'):
if isinstance(x,unicode):
return x.encode(enc)
elif isinstance(x,str):
return x
else:
return str(x).encode(enc)
from string import letters as ascii_letters, uppercase as ascii_uppercase, lowercase as ascii_lowercase
def encode_label(args):
return base64_encodestring(pickle_dumps(args)).strip()
def decode_label(label):
return pickle_loads(base64_decodestring(label))
def rawUnicode(s):
'''converts first 256 unicodes 1-1'''
return s.decode('latin1') if not isinstance(s,unicode) else s
def rawBytes(s):
'''converts first 256 unicodes 1-1'''
return s.encode('latin1') if isinstance(s,unicode) else s
def rl_exec(obj, G=None, L=None):
if G is None:
frame = sys._getframe(1)
G = frame.f_globals
if L is None:
L = frame.f_locals
del frame
elif L is None:
L = G
exec("""exec obj in G, L""")
rl_exec("""def rl_reraise(t, v, b=None):\n\traise t, v, b\n""")
char2int = ord
def rl_add_builtins(**kwd):
import __builtin__
for k,v in kwd.items():
setattr(__builtin__,k,v)
def zipImported(ldr=None):
try:
if not ldr:
ldr = sys._getframe(1).f_globals['__loader__']
from zipimport import zipimporter
return ldr if isinstance(ldr,zipimporter) else None
except:
return None
def _findFiles(dirList,ext='.ttf'):
from os.path import isfile, isdir, join as path_join
from os import listdir
ext = ext.lower()
R = []
A = R.append
for D in dirList:
if not isdir(D): continue
for fn in listdir(D):
fn = path_join(D,fn)
if isfile(fn) and (not ext or fn.lower().endswith(ext)): A(fn)
return R
class CIDict(dict):
def __init__(self,*args,**kwds):
for a in args: self.update(a)
self.update(kwds)
def update(self,D):
for k,v in D.items(): self[k] = v
def __setitem__(self,k,v):
try:
k = k.lower()
except:
pass
dict.__setitem__(self,k,v)
def __getitem__(self,k):
try:
k = k.lower()
except:
pass
return dict.__getitem__(self,k)
def __delitem__(self,k):
try:
k = k.lower()
except:
pass
return dict.__delitem__(self,k)
def get(self,k,dv=None):
try:
return self[k]
except KeyError:
return dv
def __contains__(self,k):
try:
self[k]
return True
except:
return False
def pop(self,k,*a):
try:
k = k.lower()
except:
pass
return dict.pop(*((self,k)+a))
def setdefault(self,k,*a):
try:
k = k.lower()
except:
pass
return dict.setdefault(*((self,k)+a))
if os.name == 'mac':
#with the Mac, we need to tag the file in a special
#way so the system knows it is a PDF file.
#This supplied by Joe Strout
import macfs, macostools
_KNOWN_MAC_EXT = {
'BMP' : ('ogle','BMP '),
'EPS' : ('ogle','EPSF'),
'EPSF': ('ogle','EPSF'),
'GIF' : ('ogle','GIFf'),
'JPG' : ('ogle','JPEG'),
'JPEG': ('ogle','JPEG'),
'PCT' : ('ttxt','PICT'),
'PICT': ('ttxt','PICT'),
'PNG' : ('ogle','PNGf'),
'PPM' : ('ogle','.PPM'),
'TIF' : ('ogle','TIFF'),
'TIFF': ('ogle','TIFF'),
'PDF' : ('CARO','PDF '),
'HTML': ('MSIE','TEXT'),
}
def markfilename(filename,creatorcode=None,filetype=None,ext='PDF'):
try:
if creatorcode is None or filetype is None and ext is not None:
try:
creatorcode, filetype = _KNOWN_MAC_EXT[ext.upper()]
except:
return
macfs.FSSpec(filename).SetCreatorType(creatorcode,filetype)
macostools.touched(filename)
except:
pass
else:
def markfilename(filename,creatorcode=None,filetype=None):
pass
import reportlab
__RL_DIR=os.path.dirname(reportlab.__file__) #possibly relative
_RL_DIR=os.path.isabs(__RL_DIR) and __RL_DIR or os.path.abspath(__RL_DIR)
del reportlab
#Attempt to detect if this copy of reportlab is running in a
#file system (as opposed to mostly running in a zip or McMillan
#archive or Jar file). This is used by test cases, so that
#we can write test cases that don't get activated in a compiled
try:
__file__
except:
__file__ = sys.argv[0]
import glob, fnmatch
try:
_isFSD = not __loader__
_archive = os.path.normcase(os.path.normpath(__loader__.archive))
_archivepfx = _archive + os.sep
_archivedir = os.path.dirname(_archive)
_archivedirpfx = _archivedir + os.sep
_archivepfxlen = len(_archivepfx)
_archivedirpfxlen = len(_archivedirpfx)
def __startswith_rl(fn,
_archivepfx=_archivepfx,
_archivedirpfx=_archivedirpfx,
_archive=_archive,
_archivedir=_archivedir,
os_path_normpath=os.path.normpath,
os_path_normcase=os.path.normcase,
os_getcwd=os.getcwd,
os_sep=os.sep,
os_sep_len = len(os.sep)):
'''if the name starts with a known prefix strip it off'''
fn = os_path_normpath(fn.replace('/',os_sep))
nfn = os_path_normcase(fn)
if nfn in (_archivedir,_archive): return 1,''
if nfn.startswith(_archivepfx): return 1,fn[_archivepfxlen:]
if nfn.startswith(_archivedirpfx): return 1,fn[_archivedirpfxlen:]
cwd = os_path_normcase(os_getcwd())
n = len(cwd)
if nfn.startswith(cwd):
if fn[n:].startswith(os_sep): return 1, fn[n+os_sep_len:]
if n==len(fn): return 1,''
return not os.path.isabs(fn),fn
def _startswith_rl(fn):
return __startswith_rl(fn)[1]
def rl_glob(pattern,glob=glob.glob,fnmatch=fnmatch.fnmatch, _RL_DIR=_RL_DIR,pjoin=os.path.join):
c, pfn = __startswith_rl(pattern)
r = glob(pfn)
if c or r==[]:
r += list(map(lambda x,D=_archivepfx,pjoin=pjoin: pjoin(_archivepfx,x),list(filter(lambda x,pfn=pfn,fnmatch=fnmatch: fnmatch(x,pfn),list(__loader__._files.keys())))))
return r
except:
_isFSD = os.path.isfile(__file__) #slight risk of wrong path
__loader__ = None
def _startswith_rl(fn):
return fn
def rl_glob(pattern,glob=glob.glob):
return glob(pattern)
del glob, fnmatch
_isFSSD = _isFSD and os.path.isfile(os.path.splitext(__file__)[0] +'.py')
def isFileSystemDistro():
'''return truth if a file system distribution'''
return _isFSD
def isCompactDistro():
'''return truth if not a file system distribution'''
return not _isFSD
def isSourceDistro():
'''return truth if a source file system distribution'''
return _isFSSD
def recursiveImport(modulename, baseDir=None, noCWD=0, debug=0):
"""Dynamically imports possible packagized module, or raises ImportError"""
normalize = lambda x: os.path.normcase(os.path.abspath(os.path.normpath(x)))
path = [normalize(p) for p in sys.path]
if baseDir:
if not isSeq(baseDir):
tp = [baseDir]
else:
tp = filter(None,list(baseDir))
for p in tp:
p = normalize(p)
if p not in path: path.insert(0,p)
if noCWD:
for p in ('','.',normalize('.')):
while p in path:
if debug: print('removed "%s" from path' % p)
path.remove(p)
elif '.' not in path:
path.insert(0,'.')
if debug:
import pprint
pp = pprint.pprint
print('path=')
pp(path)
#make import errors a bit more informative
opath = sys.path
try:
try:
sys.path = path
NS = {}
rl_exec('import %s as m' % modulename,NS)
return NS['m']
except ImportError:
sys.path = opath
msg = "Could not import '%s'" % modulename
if baseDir:
msg = msg + " under %s" % baseDir
annotateException(msg)
except:
e = sys.exc_info()
msg = "Exception raised while importing '%s': %s" % (modulename, e[1])
annotateException(msg)
finally:
sys.path = opath
def recursiveGetAttr(obj, name):
"Can call down into e.g. object1.object2[4].attr"
return eval(name, obj.__dict__)
def recursiveSetAttr(obj, name, value):
"Can call down into e.g. object1.object2[4].attr = value"
#get the thing above last.
tokens = name.split('.')
if len(tokens) == 1:
setattr(obj, name, value)
else:
most = '.'.join(tokens[:-1])
last = tokens[-1]
parent = recursiveGetAttr(obj, most)
setattr(parent, last, value)
def import_zlib():
try:
import zlib
except ImportError:
zlib = None
from reportlab.rl_config import ZLIB_WARNINGS
if ZLIB_WARNINGS: warnOnce('zlib not available')
return zlib
# Image Capability Detection. Set a flag haveImages
# to tell us if either PIL or Java imaging libraries present.
# define PIL_Image as either None, or an alias for the PIL.Image
# module, as there are 2 ways to import it
if sys.platform[0:4] == 'java':
try:
import javax.imageio
import java.awt.image
haveImages = 1
except:
haveImages = 0
else:
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
haveImages = Image is not None
class ArgvDictValue:
'''A type to allow clients of getArgvDict to specify a conversion function'''
def __init__(self,value,func):
self.value = value
self.func = func
def getArgvDict(**kw):
''' Builds a dictionary from its keyword arguments with overrides from sys.argv.
Attempts to be smart about conversions, but the value can be an instance
of ArgDictValue to allow specifying a conversion function.
'''
def handleValue(v,av,func):
if func:
v = func(av)
else:
if isStr(v):
v = av
elif isinstance(v,float):
v = float(av)
elif isinstance(v,int):
v = int(av)
elif isinstance(v,list):
v = list(eval(av))
elif isinstance(v,tuple):
v = tuple(eval(av))
else:
raise TypeError("Can't convert string %r to %s" % (av,type(v)))
return v
A = sys.argv[1:]
R = {}
for k, v in kw.items():
if isinstance(v,ArgvDictValue):
v, func = v.value, v.func
else:
func = None
handled = 0
ke = k+'='
for a in A:
if a.find(ke)==0:
av = a[len(ke):]
A.remove(a)
R[k] = handleValue(v,av,func)
handled = 1
break
if not handled: R[k] = handleValue(v,v,func)
return R
def getHyphenater(hDict=None):
try:
from reportlab.lib.pyHnj import Hyphen
if hDict is None: hDict=os.path.join(os.path.dirname(__file__),'hyphen.mashed')
return Hyphen(hDict)
except ImportError as errMsg:
if str(errMsg)!='No module named pyHnj': raise
return None
def _className(self):
'''Return a shortened class name'''
try:
name = self.__class__.__name__
i=name.rfind('.')
if i>=0: return name[i+1:]
return name
except AttributeError:
return str(self)
def open_for_read_by_name(name,mode='b'):
if 'r' not in mode: mode = 'r'+mode
try:
return open(name,mode)
except IOError:
if _isFSD or __loader__ is None: raise
#we have a __loader__, perhaps the filename starts with
#the dirname(reportlab.__file__) or is relative
name = _startswith_rl(name)
s = __loader__.get_data(name)
if 'b' not in mode and os.linesep!='\n': s = s.replace(os.linesep,'\n')
return getBytesIO(s)
if not isPy3:
import urllib2, urllib
urlopen=urllib2.urlopen
def datareader(url,opener=urllib.URLopener().open):
return opener(url).read()
del urllib, urllib2
else:
from urllib.request import urlopen
from urllib.parse import unquote
import base64
#copied here from urllib.URLopener.open_data because
# 1) they want to remove it
# 2) the existing one is borken
def datareader(url, unquote=unquote, decodebytes=base64.decodebytes):
"""Use "data" URL."""
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
try:
typ, data = url.split(',', 1)
except ValueError:
raise IOError('data error', 'bad data URL')
if not typ:
typ = 'text/plain;charset=US-ASCII'
semi = typ.rfind(';')
if semi >= 0 and '=' not in typ[semi:]:
encoding = typ[semi+1:]
typ = typ[:semi]
else:
encoding = ''
if encoding == 'base64':
# XXX is this encoding/decoding ok?
data = decodebytes(data.encode('ascii'))
else:
data = unquote(data).encode('latin-1')
return data
del unquote, base64
def open_for_read(name,mode='b', urlopen=urlopen, datareader=datareader):
'''attempt to open a file or URL for reading'''
if hasattr(name,'read'): return name
try:
return open_for_read_by_name(name,mode)
except:
try:
return getBytesIO(datareader(name) if name.startswith('data:') else urlopen(name).read())
except:
raise IOError('Cannot open resource "%s"' % name)
del urlopen, datareader
def open_and_read(name,mode='b'):
return open_for_read(name,mode).read()
def open_and_readlines(name,mode='t'):
return open_and_read(name,mode).split('\n')
def rl_isfile(fn,os_path_isfile=os.path.isfile):
if hasattr(fn,'read'): return True
if os_path_isfile(fn): return True
if _isFSD or __loader__ is None: return False
fn = _startswith_rl(fn)
return fn in list(__loader__._files.keys())
def rl_isdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath):
if os_path_isdir(pn): return True
if _isFSD or __loader__ is None: return False
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return len(list(filter(lambda x,pn=pn: x.startswith(pn),list(__loader__._files.keys()))))>0
def rl_listdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath,os_listdir=os.listdir):
if os_path_isdir(pn) or _isFSD or __loader__ is None: return os_listdir(pn)
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return [x[len(pn):] for x in __loader__._files.keys() if x.startswith(pn)]
def rl_getmtime(pn,os_path_isfile=os.path.isfile,os_path_normpath=os.path.normpath,os_path_getmtime=os.path.getmtime,time_mktime=time.mktime):
if os_path_isfile(pn) or _isFSD or __loader__ is None: return os_path_getmtime(pn)
p = _startswith_rl(os_path_normpath(pn))
try:
e = __loader__._files[p]
except KeyError:
return os_path_getmtime(pn)
s = e[5]
d = e[6]
return time_mktime((((d>>9)&0x7f)+1980,(d>>5)&0xf,d&0x1f,(s>>11)&0x1f,(s>>5)&0x3f,(s&0x1f)<<1,0,0,0))
def rl_get_module(name,dir):
if name in sys.modules:
om = sys.modules[name]
del sys.modules[name]
else:
om = None
try:
f = None
try:
f, p, desc= imp.find_module(name,[dir])
return imp.load_module(name,f,p,desc)
except:
if isCompactDistro():
#attempt a load from inside the zip archive
import zipimport
dir = _startswith_rl(dir)
dir = (dir=='.' or not dir) and _archive or os.path.join(_archive,dir.replace('/',os.sep))
zi = zipimport.zipimporter(dir)
return zi.load_module(name)
raise ImportError('%s[%s]' % (name,dir))
finally:
if om: sys.modules[name] = om
del om
if f: f.close()
def _isPILImage(im):
try:
return isinstance(im,Image.Image)
except AttributeError:
return 0
class ImageReader(object):
"Wraps up either PIL or Java to get data from bitmaps"
_cache={}
def __init__(self, fileName,ident=None):
if isinstance(fileName,ImageReader):
self.__dict__ = fileName.__dict__ #borgize
return
self._ident = ident
#start wih lots of null private fields, to be populated by
#the relevant engine.
self.fileName = fileName
self._image = None
self._width = None
self._height = None
self._transparent = None
self._data = None
if _isPILImage(fileName):
self._image = fileName
self.fp = getattr(fileName,'fp',None)
try:
self.fileName = self._image.fileName
except AttributeError:
self.fileName = 'PILIMAGE_%d' % id(self)
else:
try:
from reportlab.rl_config import imageReaderFlags
self.fp = open_for_read(fileName,'b')
if isinstance(self.fp,_bytesIOType): imageReaderFlags=0 #avoid messing with already internal files
if imageReaderFlags>0: #interning
data = self.fp.read()
if imageReaderFlags&2: #autoclose
try:
self.fp.close()
except:
pass
if imageReaderFlags&4: #cache the data
if not self._cache:
from rl_config import register_reset
register_reset(self._cache.clear)
data=self._cache.setdefault(_digester(data),data)
self.fp=getBytesIO(data)
elif imageReaderFlags==-1 and isinstance(fileName,str):
#try Ralf Schmitt's re-opening technique of avoiding too many open files
self.fp.close()
del self.fp #will become a property in the next statement
self.__class__=LazyImageReader
if haveImages:
#detect which library we are using and open the image
if not self._image:
self._image = self._read_image(self.fp)
if getattr(self._image,'format',None)=='JPEG': self.jpeg_fh = self._jpeg_fh
else:
from reportlab.pdfbase.pdfutils import readJPEGInfo
try:
self._width,self._height,c=readJPEGInfo(self.fp)
except:
annotateException('\nImaging Library not available, unable to import bitmaps only jpegs\nfileName=%r identity=%s'%(fileName,self.identity()))
self.jpeg_fh = self._jpeg_fh
self._data = self.fp.read()
self._dataA=None
self.fp.seek(0)
except:
annotateException('\nfileName=%r identity=%s'%(fileName,self.identity()))
def identity(self):
'''try to return information that will identify the instance'''
fn = self.fileName
if not isStr(fn):
fn = getattr(getattr(self,'fp',None),'name',None)
ident = self._ident
return '[%s@%s%s%s]' % (self.__class__.__name__,hex(id(self)),ident and (' ident=%r' % ident) or '',fn and (' filename=%r' % fn) or '')
def _read_image(self,fp):
if sys.platform[0:4] == 'java':
from javax.imageio import ImageIO
return ImageIO.read(fp)
else:
return Image.open(fp)
def _jpeg_fh(self):
fp = self.fp
fp.seek(0)
return fp
def jpeg_fh(self):
return None
def getSize(self):
if (self._width is None or self._height is None):
if sys.platform[0:4] == 'java':
self._width = self._image.getWidth()
self._height = self._image.getHeight()
else:
self._width, self._height = self._image.size
return (self._width, self._height)
def getRGBData(self):
"Return byte array of RGB data as string"
try:
if self._data is None:
self._dataA = None
if sys.platform[0:4] == 'java':
import jarray
from java.awt.image import PixelGrabber
width, height = self.getSize()
buffer = jarray.zeros(width*height, 'i')
pg = PixelGrabber(self._image, 0,0,width,height,buffer,0,width)
pg.grabPixels()
# there must be a way to do this with a cast not a byte-level loop,
# I just haven't found it yet...
pixels = []
a = pixels.append
for i in range(len(buffer)):
rgb = buffer[i]
a(chr((rgb>>16)&0xff))
a(chr((rgb>>8)&0xff))
a(chr(rgb&0xff))
self._data = ''.join(pixels)
self.mode = 'RGB'
else:
im = self._image
mode = self.mode = im.mode
if mode=='RGBA':
if Image.VERSION.startswith('1.1.7'): im.load()
self._dataA = ImageReader(im.split()[3])
im = im.convert('RGB')
self.mode = 'RGB'
elif mode not in ('L','RGB','CMYK'):
if im.format=='PNG' and im.mode=='P' and 'transparency' in im.info:
im = im.convert('RGBA')
self._dataA = ImageReader(im.split()[3])
im = im.convert('RGB')
else:
im = im.convert('RGB')
self.mode = 'RGB'
self._data = (im.tobytes if hasattr(im, 'tobytes') else im.tostring)() #make pillow and PIL both happy, for now
return self._data
except:
annotateException('\nidentity=%s'%self.identity())
def getImageData(self):
width, height = self.getSize()
return width, height, self.getRGBData()
def getTransparent(self):
if sys.platform[0:4] == 'java':
return None
else:
if "transparency" in self._image.info:
transparency = self._image.info["transparency"] * 3
palette = self._image.palette
try:
palette = palette.palette
except:
try:
palette = palette.data
except:
return None
if isPy3:
return palette[transparency:transparency+3]
else:
return [ord(c) for c in palette[transparency:transparency+3]]
else:
return None
class LazyImageReader(ImageReader):
def fp(self):
return open_for_read(self.fileName, 'b')
fp=property(fp)
def _image(self):
return self._read_image(self.fp)
_image=property(_image)
def getImageData(imageFileName):
"Get width, height and RGB pixels from image file. Wraps Java/PIL"
try:
return imageFileName.getImageData()
except AttributeError:
return ImageReader(imageFileName).getImageData()
class DebugMemo:
'''Intended as a simple report back encapsulator
Typical usages:
1. To record error data::
dbg = DebugMemo(fn='dbgmemo.dbg',myVar=value)
dbg.add(anotherPayload='aaaa',andagain='bbb')
dbg.dump()
2. To show the recorded info::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
dbg.show()
3. To re-use recorded information::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
myTestFunc(dbg.payload('myVar'),dbg.payload('andagain'))
In addition to the payload variables the dump records many useful bits
of information which are also printed in the show() method.
'''
def __init__(self,fn='rl_dbgmemo.dbg',mode='w',getScript=1,modules=(),capture_traceback=1, stdout=None, **kw):
import time, socket
self.fn = fn
if not stdout:
self.stdout = sys.stdout
else:
if hasattr(stdout,'write'):
self.stdout = stdout
else:
self.stdout = open(stdout,'w')
if mode!='w': return
self.store = store = {}
if capture_traceback and sys.exc_info() != (None,None,None):
import traceback
s = getBytesIO()
traceback.print_exc(None,s)
store['__traceback'] = s.getvalue()
cwd=os.getcwd()
lcwd = os.listdir(cwd)
pcwd = os.path.dirname(cwd)
lpcwd = pcwd and os.listdir(pcwd) or '???'
exed = os.path.abspath(os.path.dirname(sys.argv[0]))
project_version='???'
md=None
try:
import marshal
md=marshal.loads(__loader__.get_data('meta_data.mar'))
project_version=md['project_version']
except:
pass
env = os.environ
K=list(env.keys())
K.sort()
store.update({ 'gmt': time.asctime(time.gmtime(time.time())),
'platform': sys.platform,
'version': sys.version,
'hexversion': hex(sys.hexversion),
'executable': sys.executable,
'exec_prefix': sys.exec_prefix,
'prefix': sys.prefix,
'path': sys.path,
'argv': sys.argv,
'cwd': cwd,
'hostname': socket.gethostname(),
'lcwd': lcwd,
'lpcwd': lpcwd,
'byteorder': sys.byteorder,
'maxint': getattr(sys,'maxunicode','????'),
'api_version': getattr(sys,'api_version','????'),
'version_info': getattr(sys,'version_info','????'),
'winver': getattr(sys,'winver','????'),
'environment': '\n\t\t\t'.join(['']+['%s=%r' % (k,env[k]) for k in K]),
'__loader__': repr(__loader__),
'project_meta_data': md,
'project_version': project_version,
})
for M,A in (
(sys,('getwindowsversion','getfilesystemencoding')),
(os,('uname', 'ctermid', 'getgid', 'getuid', 'getegid',
'geteuid', 'getlogin', 'getgroups', 'getpgrp', 'getpid', 'getppid',
)),
):
for a in A:
if hasattr(M,a):
try:
store[a] = getattr(M,a)()
except:
pass
if exed!=cwd:
try:
store.update({'exed': exed, 'lexed': os.listdir(exed),})
except:
pass
if getScript:
fn = os.path.abspath(sys.argv[0])
if os.path.isfile(fn):
try:
store['__script'] = (fn,open(fn,'r').read())
except:
pass
module_versions = {}
for n,m in sys.modules.items():
if n=='reportlab' or n=='rlextra' or n[:10]=='reportlab.' or n[:8]=='rlextra.':
v = [getattr(m,x,None) for x in ('__version__','__path__','__file__')]
if [_f for _f in v if _f]:
v = [v[0]] + [_f for _f in v[1:] if _f]
module_versions[n] = tuple(v)
store['__module_versions'] = module_versions
self.store['__payload'] = {}
self._add(kw)
def _add(self,D):
payload = self.store['__payload']
for k, v in D.items():
payload[k] = v
def add(self,**kw):
self._add(kw)
def _dump(self,f):
try:
pos=f.tell()
pickle_dump(self.store,f)
except:
S=self.store.copy()
ff=getBytesIO()
for k,v in S.items():
try:
pickle_dump({k:v},ff)
except:
S[k] = '<unpicklable object %r>' % v
f.seek(pos,0)
pickle_dump(S,f)
def dump(self):
f = open(self.fn,'wb')
try:
self._dump(f)
finally:
f.close()
def dumps(self):
f = getBytesIO()
self._dump(f)
return f.getvalue()
def _load(self,f):
self.store = pickle_load(f)
def load(self):
f = open(self.fn,'rb')
try:
self._load(f)
finally:
f.close()
def loads(self,s):
self._load(getBytesIO(s))
def _show_module_versions(self,k,v):
self._writeln(k[2:])
K = list(v.keys())
K.sort()
for k in K:
vk = vk0 = v[k]
if isinstance(vk,tuple): vk0 = vk[0]
try:
__import__(k)
m = sys.modules[k]
d = getattr(m,'__version__',None)==vk0 and 'SAME' or 'DIFFERENT'
except:
m = None
d = '??????unknown??????'
self._writeln(' %s = %s (%s)' % (k,vk,d))
def _banner(self,k,what):
self._writeln('###################%s %s##################' % (what,k[2:]))
def _start(self,k):
self._banner(k,'Start ')
def _finish(self,k):
self._banner(k,'Finish ')
def _show_lines(self,k,v):
self._start(k)
self._writeln(v)
self._finish(k)
def _show_file(self,k,v):
k = '%s %s' % (k,os.path.basename(v[0]))
self._show_lines(k,v[1])
def _show_payload(self,k,v):
if v:
import pprint
self._start(k)
pprint.pprint(v,self.stdout)
self._finish(k)
def _show_extensions(self):
for mn in ('_rl_accel','_renderPM','sgmlop','pyRXP','pyRXPU','_imaging','Image'):
try:
A = [mn].append
__import__(mn)
m = sys.modules[mn]
A(m.__file__)
for vn in ('__version__','VERSION','_version','version'):
if hasattr(m,vn):
A('%s=%r' % (vn,getattr(m,vn)))
except:
A('not found')
self._writeln(' '+' '.join(A.__self__))
specials = {'__module_versions': _show_module_versions,
'__payload': _show_payload,
'__traceback': _show_lines,
'__script': _show_file,
}
def show(self):
K = list(self.store.keys())
K.sort()
for k in K:
if k not in list(self.specials.keys()): self._writeln('%-15s = %s' % (k,self.store[k]))
for k in K:
if k in list(self.specials.keys()): self.specials[k](self,k,self.store[k])
self._show_extensions()
def payload(self,name):
return self.store['__payload'][name]
def __setitem__(self,name,value):
self.store['__payload'][name] = value
def __getitem__(self,name):
return self.store['__payload'][name]
def _writeln(self,msg):
self.stdout.write(msg+'\n')
def _flatten(L,a):
for x in L:
if isSeq(x): _flatten(x,a)
else: a(x)
def flatten(L):
'''recursively flatten the list or tuple L'''
R = []
_flatten(L,R.append)
return R
def find_locals(func,depth=0):
'''apply func to the locals at each stack frame till func returns a non false value'''
while 1:
_ = func(sys._getframe(depth).f_locals)
if _: return _
depth += 1
class _FmtSelfDict:
def __init__(self,obj,overrideArgs):
self.obj = obj
self._overrideArgs = overrideArgs
def __getitem__(self,k):
try:
return self._overrideArgs[k]
except KeyError:
try:
return self.obj.__dict__[k]
except KeyError:
return getattr(self.obj,k)
class FmtSelfDict:
'''mixin to provide the _fmt method'''
def _fmt(self,fmt,**overrideArgs):
D = _FmtSelfDict(self, overrideArgs)
return fmt % D
def _simpleSplit(txt,mW,SW):
L = []
ws = SW(' ')
O = []
w = -ws
for t in txt.split():
lt = SW(t)
if w+ws+lt<=mW or O==[]:
O.append(t)
w = w + ws + lt
else:
L.append(' '.join(O))
O = [t]
w = lt
if O!=[]: L.append(' '.join(O))
return L
def simpleSplit(text,fontName,fontSize,maxWidth):
from reportlab.pdfbase.pdfmetrics import stringWidth
lines = asUnicode(text).split(u'\n')
SW = lambda text, fN=fontName, fS=fontSize: stringWidth(text, fN, fS)
if maxWidth:
L = []
for l in lines:
L[-1:-1] = _simpleSplit(l,maxWidth,SW)
lines = L
return lines
def escapeTextOnce(text):
"Escapes once only"
from xml.sax.saxutils import escape
if text is None:
return text
if isBytes(text): s = text.decode('utf8')
text = escape(text)
text = text.replace(u'&amp;',u'&')
text = text.replace(u'&gt;', u'>')
text = text.replace(u'&lt;', u'<')
return text
if isPy3:
def fileName2FSEnc(fn):
if isUnicode(fn):
return fn
else:
for enc in fsEncodings:
try:
return fn.decode(enc)
except:
pass
raise ValueError('cannot convert %r to filesystem encoding' % fn)
else:
def fileName2FSEnc(fn):
'''attempt to convert a filename to utf8'''
from reportlab.rl_config import fsEncodings
if isUnicode(fn):
return asBytes(fn)
else:
for enc in fsEncodings:
try:
return fn.decode(enc).encode('utf8')
except:
pass
raise ValueError('cannot convert %r to utf8 for file path name' % fn)
import itertools
def prev_this_next(items):
"""
Loop over a collection with look-ahead and look-back.
From Thomas Guest,
http://wordaligned.org/articles/zippy-triples-served-with-python
Seriously useful looping tool (Google "zippy triples")
lets you loop a collection and see the previous and next items,
which get set to None at the ends.
To be used in layout algorithms where one wants a peek at the
next item coming down the pipe.
"""
extend = itertools.chain([None], items, [None])
prev, this, next = itertools.tee(extend, 3)
try:
next(this)
next(next)
next(next)
except StopIteration:
pass
return zip(prev, this, next)
def commasplit(s):
'''
Splits the string s at every unescaped comma and returns the result as a list.
To escape a comma, double it. Individual items are stripped.
To avoid the ambiguity of 3 successive commas to denote a comma at the beginning
or end of an item, add a space between the item seperator and the escaped comma.
>>> commasplit(u'a,b,c') == [u'a', u'b', u'c']
True
>>> commasplit('a,, , b , c ') == [u'a,', u'b', u'c']
True
>>> commasplit(u'a, ,,b, c') == [u'a', u',b', u'c']
'''
if isBytes(s): s = s.decode('utf8')
n = len(s)-1
s += u' '
i = 0
r=[u'']
while i<=n:
if s[i]==u',':
if s[i+1]==u',':
r[-1]+=u','
i += 1
else:
r[-1] = r[-1].strip()
if i!=n: r.append(u'')
else:
r[-1] += s[i]
i+=1
r[-1] = r[-1].strip()
return r
def commajoin(l):
'''
Inverse of commasplit, except that whitespace around items is not conserved.
Adds more whitespace than needed for simplicity and performance.
>>> commasplit(commajoin(['a', 'b', 'c'])) == [u'a', u'b', u'c']
True
>>> commasplit((commajoin([u'a,', u' b ', u'c'])) == [u'a,', u'b', u'c']
True
>>> commasplit((commajoin([u'a ', u',b', u'c'])) == [u'a', u',b', u'c']
'''
return u','.join([ u' ' + asUnicode(i).replace(u',', u',,') + u' ' for i in l ])
def findInPaths(fn,paths,isfile=True,fail=False):
'''search for relative files in likely places'''
exists = isfile and os.path.isfile or os.path.isdir
if exists(fn): return fn
pjoin = os.path.join
if not os.path.isabs(fn):
for p in paths:
pfn = pjoin(p,fn)
if exists(pfn):
return pfn
if fail: raise ValueError('cannot locate %r with paths=%r' % (fn,paths))
return fn
def annotateException(msg,enc='utf8'):
'''add msg to the args of an existing exception'''
if not msg: raise
t,v,b=sys.exc_info()
if not hasattr(v,'args'): raise
e = -1
A = list(v.args)
for i,a in enumerate(A):
if isinstance(a,str):
e = i
break
if e>=0:
if not isPy3:
if isUnicode(a):
if not isUnicode(msg):
msg=msg.decode(enc)
else:
if isUnicode(msg):
msg=msg.encode(enc)
else:
msg = str(msg)
if isinstance(v,IOError) and getattr(v,'strerror',None):
v.strerror = msg+'\n'+str(v.strerror)
else:
A[e] += msg
else:
A.append(msg)
v.args = tuple(A)
rl_reraise(t,v,b)
def escapeOnce(data):
"""Ensure XML output is escaped just once, irrespective of input
>>> escapeOnce('A & B')
'A & B'
>>> escapeOnce('C & D')
'C & D'
>>> escapeOnce('E &amp; F')
'E & F'
"""
data = data.replace("&", "&")
#...but if it was already escaped, make sure it
# is not done twice....this will turn any tags
# back to how they were at the start.
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
data = data.replace("&#", "&#")
#..and just in case someone had double-escaped it, do it again
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
return data
class IdentStr(str):
'''useful for identifying things that get split'''
def __new__(cls,value):
if isinstance(value,IdentStr):
inc = value.__inc
value = value[:-(2+len(str(inc)))]
inc += 1
else:
inc = 0
value += '[%d]' % inc
self = str.__new__(cls,value)
self.__inc = inc
return self
class RLString(str):
'''allows specification of extra properties of a string using a dictionary of extra attributes
eg fontName = RLString('proxima-nova-bold',
svgAttrs=dict(family='"proxima-nova"',weight='bold'))
'''
def __new__(cls,v,**kwds):
self = str.__new__(cls,v)
for k,v in kwds.items():
setattr(self,k,v)
return self
def makeFileName(s):
'''force filename strings to unicode so python can handle encoding stuff'''
if not isUnicode(s):
s = s.decode('utf8')
return s
|
|
# standard libraries
import logging
import os
import random
import re
import string
import subprocess
from typing import List
# rubiks cube libraries
from rubikscubennnsolver import reverse_steps
from rubikscubennnsolver.LookupTable import LookupTable, NoIDASolution, download_file_if_needed
logger = logging.getLogger(__name__)
def remove_failed_ida_output(lines: List[str]) -> List[str]:
"""
Args:
lines: log output from IDA
Returns:
the log output but with failed IDA output removed
"""
result = []
ida_output = []
for line in lines:
if line:
ida_output.append(line)
else:
ida_output.append(line)
if "IDA failed with range" not in "".join(ida_output):
result.extend(ida_output)
ida_output = []
if ida_output and "IDA failed with range" not in "".join(ida_output):
result.extend(ida_output)
ida_output = []
return result
class LookupTableIDAViaGraph(LookupTable):
"""
multipliers
1.04 will round 13 to 14, 14 to 15, etc
1.05 will round 10 to 11, etc
1.06 will round 9 to 10, etc
1.07 will round 8 to 9, etc
1.08 will round 7 to 8, etc
1.09 will round 6 to 7, etc
1.10 will round 5 to 6, etc
1.11 will round 5 to 6, etc
1.12 will round 5 to 6, etc
1.13 will round 4 to 5, etc
"""
def __init__(
self,
parent,
filename: str = None,
all_moves: List[str] = [],
illegal_moves: List[str] = [],
state_target: str = None,
linecount: int = None,
max_depth: int = None,
filesize: int = None,
legal_moves: List[str] = [],
prune_tables=[],
multiplier: float = None,
main_table_state_length: int = None,
main_table_max_depth: int = None,
main_table_prune_tables=None,
perfect_hash01_filename: str = None,
perfect_hash02_filename: str = None,
perfect_hash12_filename: str = None,
perfect_hash34_filename: str = None,
pt1_state_max: int = None,
pt2_state_max: int = None,
pt4_state_max: int = None,
centers_only: bool = False,
use_uthash: bool = False,
C_ida_type: str = None,
):
LookupTable.__init__(self, parent, filename, state_target, linecount, max_depth, filesize)
self.recolor_positions = []
self.recolor_map = {}
self.nuke_corners = False
self.nuke_edges = False
self.nuke_centers = False
self.prune_tables = prune_tables
self.multiplier = multiplier
self.main_table_state_length = main_table_state_length
self.main_table_max_depth = main_table_max_depth
self.main_table_prune_tables = main_table_prune_tables
self.centers_only = centers_only
self.use_uthash = use_uthash
self.C_ida_type = C_ida_type
if perfect_hash01_filename:
self.perfect_hash01_filename = "lookup-tables/" + perfect_hash01_filename
else:
self.perfect_hash01_filename = perfect_hash01_filename
if perfect_hash02_filename:
self.perfect_hash02_filename = "lookup-tables/" + perfect_hash02_filename
else:
self.perfect_hash02_filename = perfect_hash02_filename
if perfect_hash12_filename:
self.perfect_hash12_filename = "lookup-tables/" + perfect_hash12_filename
else:
self.perfect_hash12_filename = perfect_hash12_filename
if perfect_hash34_filename:
self.perfect_hash34_filename = "lookup-tables/" + perfect_hash34_filename
else:
self.perfect_hash34_filename = perfect_hash34_filename
self.pt1_state_max = pt1_state_max
self.pt2_state_max = pt2_state_max
self.pt4_state_max = pt4_state_max
if self.perfect_hash01_filename:
assert (
self.perfect_hash01_filename and self.pt1_state_max
), "both perfect_hash01_filename and pt1_state_max must be specified"
download_file_if_needed(self.perfect_hash01_filename)
if self.perfect_hash02_filename:
assert (
self.perfect_hash02_filename and self.pt2_state_max
), "both perfect_hash02_filename and pt2_state_max must be specified"
download_file_if_needed(self.perfect_hash02_filename)
if self.perfect_hash12_filename:
assert (
self.perfect_hash12_filename and self.pt2_state_max
), "both perfect_hash12_filename and pt2_state_max must be specified"
download_file_if_needed(self.perfect_hash12_filename)
if self.perfect_hash34_filename:
assert (
self.perfect_hash34_filename and self.pt4_state_max
), "both perfect_hash34_filename and pt4_state_max must be specified"
download_file_if_needed(self.perfect_hash34_filename)
if legal_moves:
self.all_moves = list(legal_moves)
else:
for x in illegal_moves:
if x not in all_moves:
raise Exception(f"illegal move {x} is not in the list of legal moves")
self.all_moves = []
for x in all_moves:
if x not in illegal_moves:
self.all_moves.append(x)
logger.debug(f"{self}: all_moves {','.join(self.all_moves)}")
COST_LENGTH = 1
STATE_INDEX_LENGTH = 4
self.ROW_LENGTH = COST_LENGTH + (STATE_INDEX_LENGTH * len(self.all_moves))
def get_ida_graph_nodes(self):
return [pt.ida_graph_node for pt in self.prune_tables]
def set_ida_graph_nodes(self, ida_graph_nodes) -> None:
for (pt, node) in zip(self.prune_tables, ida_graph_nodes):
pt.ida_graph_node = node
def init_state_index_caches(self) -> None:
for pt in self.prune_tables:
pt.load_state_index_cache()
def init_ida_graph_nodes(self) -> None:
for pt in self.prune_tables:
pt.ida_graph_node = pt.state_index()
def recolor(self):
"""
re-color the cube per use_nuke_edges, etd and recolor_positions
"""
if self.nuke_corners or self.nuke_edges or self.nuke_centers or self.recolor_positions:
logger.info(f"{self}: recolor")
# self.parent.print_cube("pre recolor")
if self.nuke_corners:
self.parent.nuke_corners()
if self.nuke_edges:
self.parent.nuke_edges()
if self.nuke_centers:
self.parent.nuke_centers()
for x in self.recolor_positions:
x_color = self.parent.state[x]
x_new_color = self.recolor_map.get(x_color)
if x_new_color:
self.parent.state[x] = x_new_color
# self.parent.print_cube("post recolor")
# sys.exit(0)
def build_ida_graph_set_cube_state(self, state, steps_to_scramble) -> None:
# If the table we are building is one with multiple goal states then the
# child class must override this method.
self.parent.re_init()
for step in steps_to_scramble:
self.parent.rotate(step)
def build_ida_graph(self, start=None, end=None):
pt_state_filename = self.filename.replace(".txt", ".pt_state")
if start is not None:
pt_state_filename += f"-{start}-{end}"
for pt in self.prune_tables:
pt.load_ida_graph()
to_write = []
self.init_state_index_caches()
with open(pt_state_filename, "w") as fh_pt_state:
with open(self.filename, "r") as fh:
for (line_number, line) in enumerate(fh):
if start is not None and line_number < start:
continue
if end is not None and line_number > end:
break
(state, steps_to_solve) = line.rstrip().split(":")
steps_to_solve = steps_to_solve.split()
if state in self.state_target:
cost_to_goal = 0
else:
cost_to_goal = len(steps_to_solve)
steps_to_scramble = reverse_steps(steps_to_solve)
self.build_ida_graph_set_cube_state(state, steps_to_scramble)
self.init_ida_graph_nodes()
pt_ida_graph_nodes = self.get_ida_graph_nodes()
lt_state = ""
for x in pt_ida_graph_nodes:
assert x <= 9999999
lt_state += f"{x:07d}-"
lt_state = lt_state.rstrip("-")
to_write.append(f"{lt_state}:{cost_to_goal}")
if line_number and line_number % 100000 == 0:
fh_pt_state.write("\n".join(to_write) + "\n")
to_write = []
if start is not None:
logger.info(f"{start:,}->{end:,} line {line_number:,}")
else:
logger.info(f"line {line_number:,}")
if to_write:
fh_pt_state.write("\n".join(to_write) + "\n")
to_write = []
def solutions_via_c(
self,
pt_states=[],
min_ida_threshold: int = None,
max_ida_threshold: int = None,
solution_count: int = None,
find_extra: bool = False,
use_kociemba_string: bool = False,
) -> List[List[str]]:
cmd = ["./ida_search_via_graph"]
if pt_states:
pt_states = sorted(set(pt_states))
pt_states_filename = (
"/tmp/pt-states-" + "".join(random.choice(string.ascii_uppercase) for i in range(6)) + ".txt"
)
for (index, pt) in enumerate(self.prune_tables):
cmd.append("--prune-table-%d-filename" % index)
cmd.append(pt.filename_bin)
with open(pt_states_filename, "w") as fh:
for x in pt_states:
fh.write(",".join(map(str, x)) + "\n")
cmd.append("--prune-table-states")
cmd.append(pt_states_filename)
else:
self.init_ida_graph_nodes()
pt_states_filename = None
for (index, pt) in enumerate(self.prune_tables):
cmd.append("--prune-table-%d-filename" % index)
cmd.append(pt.filename_bin)
if not pt_states:
cmd.append("--prune-table-%d-state" % index)
cmd.append(str(pt.ida_graph_node))
if self.avoid_oll is not None:
orbits_with_oll = self.parent.center_solution_leads_to_oll_parity()
if self.avoid_oll == 0 or self.avoid_oll == (0, 1):
# Edge parity is currently odd so we need an odd number of w turns in orbit 0
if 0 in orbits_with_oll:
cmd.append("--orbit0-need-odd-w")
# Edge parity is currently even so we need an even number of w turns in orbit 0
else:
cmd.append("--orbit0-need-even-w")
if self.avoid_oll == 1 or self.avoid_oll == (0, 1):
# Edge parity is currently odd so we need an odd number of w turns in orbit 1
if 1 in orbits_with_oll:
cmd.append("--orbit1-need-odd-w")
# Edge parity is currently even so we need an even number of w turns in orbit 1
else:
cmd.append("--orbit1-need-even-w")
if self.avoid_oll != 0 and self.avoid_oll != 1 and self.avoid_oll != (0, 1):
raise Exception(f"avoid_oll is only supported for orbits 0 or 1, not {self.avoid_oll}")
# If this is a lookup table that is staging a pair of colors (such as U and D) then recolor the cubies accordingly.
pre_recolor_state = self.parent.state[:]
pre_recolor_solution = self.parent.solution[:]
self.recolor()
if use_kociemba_string:
kociemba_string = self.parent.get_kociemba_string(True)
cmd.append("--kociemba")
cmd.append(kociemba_string)
if self.perfect_hash01_filename:
cmd.append("--prune-table-perfect-hash01")
cmd.append(self.perfect_hash01_filename)
cmd.append("--pt1-state-max")
cmd.append(str(self.pt1_state_max))
if self.perfect_hash02_filename:
cmd.append("--prune-table-perfect-hash02")
cmd.append(self.perfect_hash02_filename)
cmd.append("--pt2-state-max")
cmd.append(str(self.pt2_state_max))
if self.perfect_hash12_filename:
cmd.append("--prune-table-perfect-hash12")
cmd.append(self.perfect_hash12_filename)
cmd.append("--pt2-state-max")
cmd.append(str(self.pt2_state_max))
if self.perfect_hash34_filename:
cmd.append("--prune-table-perfect-hash34")
cmd.append(self.perfect_hash34_filename)
cmd.append("--pt4-state-max")
cmd.append(str(self.pt4_state_max))
if min_ida_threshold is not None:
cmd.append("--min-ida-threshold")
cmd.append(str(min_ida_threshold))
if max_ida_threshold is not None:
cmd.append("--max-ida-threshold")
cmd.append(str(max_ida_threshold))
if self.centers_only:
cmd.append("--centers-only")
if self.use_uthash:
cmd.append("--uthash")
if self.C_ida_type is not None:
cmd.append("--type")
cmd.append(self.C_ida_type)
if solution_count is not None:
cmd.append("--solution-count")
cmd.append(str(solution_count))
if find_extra:
cmd.append("--find-extra")
cmd.append("--legal-moves")
cmd.append(",".join(self.all_moves))
# wrap the X,Y,Z part of "--legal-moves X,Y,Z" in double quotes
cmd_string = " ".join(cmd)
cmd_string = cmd_string.replace("--legal-moves ", '--legal-moves "')
cmd_string += '"'
if self.multiplier:
cmd_string += f" --multiplier {self.multiplier}"
cmd.append("--multiplier")
cmd.append(str(self.multiplier))
logger.info(f"{self}: solving via C ida_search\n{cmd_string}\n")
output = subprocess.check_output(cmd).decode("utf-8")
output = "\n".join(remove_failed_ida_output(output.splitlines()))
self.parent.solve_via_c_output = f"\n{cmd_string}\n{output}\n"
logger.info(f"\n{output}\n\n")
if pt_states_filename is not None:
os.unlink(pt_states_filename)
solutions = []
pt0_state = None
pt1_state = None
pt2_state = None
pt3_state = None
pt4_state = None
self.parent.state = pre_recolor_state[:]
self.parent.solution = pre_recolor_solution[:]
RE_PT_STATES = re.compile(
r"pt0_state (\d+), pt1_state (\d+), pt2_state (\d+), pt3_state (\d+), pt4_state (\d+)"
)
for line in output.splitlines():
match = RE_PT_STATES.search(line)
if match:
pt0_state = int(match.group(1))
pt1_state = int(match.group(2))
pt2_state = int(match.group(3))
pt3_state = int(match.group(4))
pt4_state = int(match.group(5))
elif line.startswith("SOLUTION"):
solution = tuple(line.split(":")[1].strip().split())
solutions.append((len(solution), solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state)))
if solutions:
# sort so the shortest solutions are first
solutions.sort()
# chop the solutions length
solutions = [x[1:3] for x in solutions]
return solutions
else:
raise NoIDASolution(f"Did not find SOLUTION line in\n{output}\n")
def solve_via_c(
self,
pt_states=[],
min_ida_threshold: int = None,
max_ida_threshold: int = None,
solution_count: int = None,
find_extra: bool = False,
use_kociemba_string: bool = False,
) -> None:
solution = self.solutions_via_c(
pt_states=pt_states,
min_ida_threshold=min_ida_threshold,
max_ida_threshold=max_ida_threshold,
solution_count=solution_count,
find_extra=find_extra,
use_kociemba_string=use_kociemba_string,
)[0][0]
for step in solution:
self.parent.rotate(step)
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for commonly used utilities."""
import base64
import functools
import inspect
import json
import logging
import os
import warnings
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
_IS_DIR_MESSAGE = '{0}: Is a directory'
_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a
required keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``_helpers.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be
keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args
from being used as positional parameters.
Raises:
TypeError: if a key-word only argument is provided as a positional
parameter, but only if
_helpers.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = ('{function}() takes at most {args_max} positional '
'argument{plural} ({args_given} given)'.format(
function=wrapped.__name__,
args_max=max_positional_args,
args_given=len(args),
plural=plural_s))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, six.string_types):
return scopes
else:
return ' '.join(scopes)
def string_to_scopes(scopes):
"""Converts stringifed scope value to a list.
If scopes is a list then it is simply passed through. If scopes is an
string then a list of each individual scope is returned.
Args:
scopes: a string or iterable of strings, the scopes.
Returns:
The scopes in a list.
"""
if not scopes:
return []
elif isinstance(scopes, six.string_types):
return scopes.split(' ')
else:
return scopes
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params
def update_query_params(uri, params):
"""Updates a URI with new query parameters.
If a given key from ``params`` is repeated in the ``uri``, then
the URI will be considered invalid and an error will occur.
If the URI is valid, then each value from ``params`` will
replace the corresponding value in the query parameters (if
it exists).
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = urllib.parse.urlparse(uri)
query_params = parse_unique_urlencoded(parts.query)
query_params.update(params)
new_query = urllib.parse.urlencode(query_params)
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
return update_query_params(url, {name: value})
def validate_file(filename):
if os.path.islink(filename):
raise IOError(_SYM_LINK_MESSAGE.format(filename))
elif os.path.isdir(filename):
raise IOError(_IS_DIR_MESSAGE.format(filename))
elif not os.path.isfile(filename):
warnings.warn(_MISSING_FILE_MESSAGE.format(filename))
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or
PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or
else None.
"""
offset = raw_key_input.find(b'-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
def _json_encode(data):
return json.dumps(data, separators=(',', ':'))
def _to_bytes(value, encoding='ascii'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
Args:
value: The string/bytes value to be converted.
encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from ordinals
larger than 127. Other useful values are "latin-1", which
which will only allows byte ordinals (up to 255) and "utf-8",
which will encode any unicode that needs to be.
Returns:
The original value converted to bytes (if unicode) or as passed in
if it started out as bytes.
Raises:
ValueError if the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value))
def _from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value: The string/bytes value to be converted.
Returns:
The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError if the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value))
def _urlsafe_b64encode(raw_bytes):
raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = _to_bytes(b64string)
padded = b64string + b'=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
|
|
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from PIL import Image, ImageFile, ImagePalette, _binary
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
#
# helpers
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4)); x0 = i32(read(4))
y1 = i32(read(4)); x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4))
combined = 0
if size:
length = i32(read(4))
if length:
mask_y = i32(read(4)); mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y; mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding, Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(size - combined, 1)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i += 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open("PSD", PsdImageFile, _accept)
Image.register_extension("PSD", ".psd")
|
|
"""Exercises for eager loading.
Derived from mailing list-reported problems and trac tickets.
These are generally very old 0.1-era tests and at some point should
be cleaned up and modernized.
"""
import datetime
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.orm import backref
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class EagerTest(fixtures.MappedTest):
run_deletes = None
run_inserts = "once"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"owners",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"categories",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(20)),
)
Table(
"tests",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"owner_id", Integer, ForeignKey("owners.id"), nullable=False
),
Column(
"category_id",
Integer,
ForeignKey("categories.id"),
nullable=False,
),
)
Table(
"options",
metadata,
Column(
"test_id", Integer, ForeignKey("tests.id"), primary_key=True
),
Column(
"owner_id", Integer, ForeignKey("owners.id"), primary_key=True
),
Column(
"someoption",
sa.Boolean,
server_default=sa.false(),
nullable=False,
),
)
@classmethod
def setup_classes(cls):
class Owner(cls.Basic):
pass
class Category(cls.Basic):
pass
class Thing(cls.Basic):
pass
class Option(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Category, owners, Option, tests, Thing, Owner, options, categories = (
cls.classes.Category,
cls.tables.owners,
cls.classes.Option,
cls.tables.tests,
cls.classes.Thing,
cls.classes.Owner,
cls.tables.options,
cls.tables.categories,
)
mapper(Owner, owners)
mapper(Category, categories)
mapper(
Option,
options,
properties=dict(
owner=relationship(Owner, viewonly=True),
test=relationship(Thing, viewonly=True),
),
)
mapper(
Thing,
tests,
properties=dict(
owner=relationship(Owner, backref="tests"),
category=relationship(Category),
owner_option=relationship(
Option,
primaryjoin=sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
foreign_keys=[options.c.test_id, options.c.owner_id],
uselist=False,
),
),
)
@classmethod
def insert_data(cls):
Owner, Category, Option, Thing = (
cls.classes.Owner,
cls.classes.Category,
cls.classes.Option,
cls.classes.Thing,
)
session = create_session()
o = Owner()
c = Category(name="Some Category")
session.add_all(
(
Thing(owner=o, category=c),
Thing(
owner=o, category=c, owner_option=Option(someoption=True)
),
Thing(owner=o, category=c, owner_option=Option()),
)
)
session.flush()
def test_noorm(self):
"""test the control case"""
tests, options, categories = (
self.tables.tests,
self.tables.options,
self.tables.categories,
)
# I want to display a list of tests owned by owner 1
# if someoption is false or they haven't specified it yet (null)
# but not if they set it to true (example someoption is for hiding)
# desired output for owner 1
# test_id, cat_name
# 1 'Some Category'
# 3 "
# not orm style correct query
print("Obtaining correct results without orm")
result = (
sa.select(
[tests.c.id, categories.c.name],
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None, # noqa
options.c.someoption == False,
),
),
order_by=[tests.c.id],
from_obj=[
tests.join(categories).outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
],
)
.execute()
.fetchall()
)
eq_(result, [(1, "Some Category"), (3, "Some Category")])
def test_withoutjoinedload(self):
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = create_session()
result = (
s.query(Thing)
.select_from(
tests.outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
)
.filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None, # noqa
options.c.someoption == False,
),
)
)
)
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
def test_withjoinedload(self):
"""
Test that an joinedload locates the correct "from" clause with which to
attach to, when presented with a query that already has a complicated
from clause.
"""
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.select_from(
tests.outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
).filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None,
options.c.someoption == False, # noqa
),
)
)
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
def test_dslish(self):
"""test the same as withjoinedload except using generative"""
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None,
options.c.someoption == False, # noqa
),
)
).outerjoin("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
@testing.crashes("sybase", "FIXME: unknown, verify not fails_on")
def test_without_outerjoin_literal(self):
Thing, tests = (self.classes.Thing, self.tables.tests)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
(tests.c.owner_id == 1)
& text(
"options.someoption is null or options.someoption=:opt"
).bindparams(opt=False)
).join("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["3 Some Category"])
def test_withoutouterjoin(self):
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = create_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
(tests.c.owner_id == 1)
& (
(options.c.someoption == None)
| (options.c.someoption == False)
) # noqa
).join("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["3 Some Category"])
class EagerTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"left",
metadata,
Column("id", Integer, ForeignKey("middle.id"), primary_key=True),
Column("data", String(50), primary_key=True),
)
Table(
"middle",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"right",
metadata,
Column("id", Integer, ForeignKey("middle.id"), primary_key=True),
Column("data", String(50), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Left(cls.Basic):
def __init__(self, data):
self.data = data
class Middle(cls.Basic):
def __init__(self, data):
self.data = data
class Right(cls.Basic):
def __init__(self, data):
self.data = data
@classmethod
def setup_mappers(cls):
Right, Middle, middle, right, left, Left = (
cls.classes.Right,
cls.classes.Middle,
cls.tables.middle,
cls.tables.right,
cls.tables.left,
cls.classes.Left,
)
# set up bi-directional eager loads
mapper(Left, left)
mapper(Right, right)
mapper(
Middle,
middle,
properties=dict(
left=relationship(
Left,
lazy="joined",
backref=backref("middle", lazy="joined"),
),
right=relationship(
Right,
lazy="joined",
backref=backref("middle", lazy="joined"),
),
),
),
def test_eager_terminate(self):
"""Eager query generation does not include the same mapper's table twice.
Or, that bi-directional eager loads don't include each other in eager
query generation.
"""
Middle, Right, Left = (
self.classes.Middle,
self.classes.Right,
self.classes.Left,
)
p = Middle("m1")
p.left.append(Left("l1"))
p.right.append(Right("r1"))
session = create_session()
session.add(p)
session.flush()
session.expunge_all()
session.query(Left).filter_by(data="l1").one()
class EagerTest3(fixtures.MappedTest):
"""Eager loading combined with nested SELECT statements, functions, and
aggregates."""
@classmethod
def define_tables(cls, metadata):
Table(
"datas",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a", Integer, nullable=False),
)
Table(
"foo",
metadata,
Column(
"data_id", Integer, ForeignKey("datas.id"), primary_key=True
),
Column("bar", Integer),
)
Table(
"stats",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data_id", Integer, ForeignKey("datas.id")),
Column("somedata", Integer, nullable=False),
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
class Foo(cls.Basic):
pass
class Stat(cls.Basic):
pass
def test_nesting_with_functions(self):
Stat, Foo, stats, foo, Data, datas = (
self.classes.Stat,
self.classes.Foo,
self.tables.stats,
self.tables.foo,
self.classes.Data,
self.tables.datas,
)
mapper(Data, datas)
mapper(
Foo,
foo,
properties={
"data": relationship(
Data, backref=backref("foo", uselist=False)
)
},
)
mapper(Stat, stats, properties={"data": relationship(Data)})
session = create_session()
data = [Data(a=x) for x in range(5)]
session.add_all(data)
session.add_all(
(
Stat(data=data[0], somedata=1),
Stat(data=data[1], somedata=2),
Stat(data=data[2], somedata=3),
Stat(data=data[3], somedata=4),
Stat(data=data[4], somedata=5),
Stat(data=data[0], somedata=6),
Stat(data=data[1], somedata=7),
Stat(data=data[2], somedata=8),
Stat(data=data[3], somedata=9),
Stat(data=data[4], somedata=10),
)
)
session.flush()
arb_data = sa.select(
[stats.c.data_id, sa.func.max(stats.c.somedata).label("max")],
stats.c.data_id <= 5,
group_by=[stats.c.data_id],
)
arb_result = arb_data.execute().fetchall()
# order the result list descending based on 'max'
arb_result.sort(key=lambda a: a._mapping["max"], reverse=True)
# extract just the "data_id" from it
arb_result = [row._mapping["data_id"] for row in arb_result]
arb_data = arb_data.alias("arb")
# now query for Data objects using that above select, adding the
# "order by max desc" separately
q = (
session.query(Data)
.options(sa.orm.joinedload("foo"))
.select_from(
datas.join(arb_data, arb_data.c.data_id == datas.c.id)
)
.order_by(sa.desc(arb_data.c.max))
.limit(10)
)
# extract "data_id" from the list of result objects
verify_result = [d.id for d in q]
eq_(verify_result, arb_result)
class EagerTest4(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"departments",
metadata,
Column(
"department_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
)
Table(
"employees",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column(
"department_id",
Integer,
ForeignKey("departments.department_id"),
),
)
@classmethod
def setup_classes(cls):
class Department(cls.Basic):
pass
class Employee(cls.Basic):
pass
def test_basic(self):
Department, Employee, employees, departments = (
self.classes.Department,
self.classes.Employee,
self.tables.employees,
self.tables.departments,
)
mapper(Employee, employees)
mapper(
Department,
departments,
properties=dict(
employees=relationship(
Employee, lazy="joined", backref="department"
)
),
)
d1 = Department(name="One")
for e in "Jim", "Jack", "John", "Susan":
d1.employees.append(Employee(name=e))
d2 = Department(name="Two")
for e in "Joe", "Bob", "Mary", "Wally":
d2.employees.append(Employee(name=e))
sess = create_session()
sess.add_all((d1, d2))
sess.flush()
q = (
sess.query(Department)
.join("employees")
.filter(Employee.name.startswith("J"))
.distinct()
.order_by(sa.desc(Department.name))
)
eq_(q.count(), 2)
assert q[0] is d2
class EagerTest5(fixtures.MappedTest):
"""Construction of AliasedClauses for the same eager load property but
different parent mappers, due to inheritance."""
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column("uid", String(30), primary_key=True),
Column("x", String(30)),
)
Table(
"derived",
metadata,
Column(
"uid", String(30), ForeignKey("base.uid"), primary_key=True
),
Column("y", String(30)),
)
Table(
"derivedII",
metadata,
Column(
"uid", String(30), ForeignKey("base.uid"), primary_key=True
),
Column("z", String(30)),
)
Table(
"comments",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("uid", String(30), ForeignKey("base.uid")),
Column("comment", String(30)),
)
@classmethod
def setup_classes(cls):
class Base(cls.Basic):
def __init__(self, uid, x):
self.uid = uid
self.x = x
class Derived(Base):
def __init__(self, uid, x, y):
self.uid = uid
self.x = x
self.y = y
class DerivedII(Base):
def __init__(self, uid, x, z):
self.uid = uid
self.x = x
self.z = z
class Comment(cls.Basic):
def __init__(self, uid, comment):
self.uid = uid
self.comment = comment
def test_basic(self):
(
Comment,
Derived,
derived,
comments,
DerivedII,
Base,
base,
derivedII,
) = (
self.classes.Comment,
self.classes.Derived,
self.tables.derived,
self.tables.comments,
self.classes.DerivedII,
self.classes.Base,
self.tables.base,
self.tables.derivedII,
)
mapper(Comment, comments)
baseMapper = mapper(
Base,
base,
properties=dict(
comments=relationship(
Comment, lazy="joined", cascade="all, delete-orphan"
)
),
)
mapper(Derived, derived, inherits=baseMapper)
mapper(DerivedII, derivedII, inherits=baseMapper)
sess = create_session()
d = Derived("uid1", "x", "y")
d.comments = [Comment("uid1", "comment")]
d2 = DerivedII("uid2", "xx", "z")
d2.comments = [Comment("uid2", "comment")]
sess.add_all((d, d2))
sess.flush()
sess.expunge_all()
# this eager load sets up an AliasedClauses for the "comment"
# relationship, then stores it in clauses_by_lead_mapper[mapper for
# Derived]
d = sess.query(Derived).get("uid1")
sess.expunge_all()
assert len([c for c in d.comments]) == 1
# this eager load sets up an AliasedClauses for the "comment"
# relationship, and should store it in clauses_by_lead_mapper[mapper
# for DerivedII]. the bug was that the previous AliasedClause create
# prevented this population from occurring.
d2 = sess.query(DerivedII).get("uid2")
sess.expunge_all()
# object is not in the session; therefore the lazy load cant trigger
# here, eager load had to succeed
assert len([c for c in d2.comments]) == 1
class EagerTest6(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"design_types",
metadata,
Column(
"design_type_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
)
Table(
"design",
metadata,
Column(
"design_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"parts",
metadata,
Column(
"part_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("design_id", Integer, ForeignKey("design.design_id")),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"inherited_part",
metadata,
Column(
"ip_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("part_id", Integer, ForeignKey("parts.part_id")),
Column("design_id", Integer, ForeignKey("design.design_id")),
)
@classmethod
def setup_classes(cls):
class Part(cls.Basic):
pass
class Design(cls.Basic):
pass
class DesignType(cls.Basic):
pass
class InheritedPart(cls.Basic):
pass
def test_one(self):
(
Part,
inherited_part,
design_types,
DesignType,
parts,
design,
Design,
InheritedPart,
) = (
self.classes.Part,
self.tables.inherited_part,
self.tables.design_types,
self.classes.DesignType,
self.tables.parts,
self.tables.design,
self.classes.Design,
self.classes.InheritedPart,
)
p_m = mapper(Part, parts)
mapper(
InheritedPart,
inherited_part,
properties=dict(part=relationship(Part, lazy="joined")),
)
d_m = mapper(
Design,
design,
properties=dict(
inheritedParts=relationship(
InheritedPart,
cascade="all, delete-orphan",
backref="design",
)
),
)
mapper(DesignType, design_types)
d_m.add_property(
"type", relationship(DesignType, lazy="joined", backref="designs")
)
p_m.add_property(
"design",
relationship(
Design,
lazy="joined",
backref=backref("parts", cascade="all, delete-orphan"),
),
)
d = Design()
sess = create_session()
sess.add(d)
sess.flush()
sess.expunge_all()
x = sess.query(Design).get(1)
x.inheritedParts
class EagerTest7(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"companies",
metadata,
Column(
"company_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_name", String(40)),
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("address", String(40)),
)
Table(
"phone_numbers",
metadata,
Column(
"phone_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("address_id", Integer, ForeignKey("addresses.address_id")),
Column("type", String(20)),
Column("number", String(10)),
)
Table(
"invoices",
metadata,
Column(
"invoice_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("date", sa.DateTime),
)
@classmethod
def setup_classes(cls):
class Company(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Phone(cls.Comparable):
pass
class Invoice(cls.Comparable):
pass
def test_load_m2o_attached_to_o2(self):
"""
Tests eager load of a many-to-one attached to a one-to-many. this
testcase illustrated the bug, which is that when the single Company is
loaded, no further processing of the rows occurred in order to load
the Company's second Address object.
"""
addresses, invoices, Company, companies, Invoice, Address = (
self.tables.addresses,
self.tables.invoices,
self.classes.Company,
self.tables.companies,
self.classes.Invoice,
self.classes.Address,
)
mapper(Address, addresses)
mapper(
Company,
companies,
properties={"addresses": relationship(Address, lazy="joined")},
)
mapper(
Invoice,
invoices,
properties={"company": relationship(Company, lazy="joined")},
)
a1 = Address(address="a1 address")
a2 = Address(address="a2 address")
c1 = Company(company_name="company 1", addresses=[a1, a2])
i1 = Invoice(date=datetime.datetime.now(), company=c1)
session = create_session()
session.add(i1)
session.flush()
company_id = c1.company_id
invoice_id = i1.invoice_id
session.expunge_all()
c = session.query(Company).get(company_id)
session.expunge_all()
i = session.query(Invoice).get(invoice_id)
def go():
eq_(c, i.company)
eq_(c.addresses, i.company.addresses)
self.assert_sql_count(testing.db, go, 0)
class EagerTest8(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"prj",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("created", sa.DateTime),
Column("title", sa.String(100)),
)
Table(
"task",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"status_id",
Integer,
ForeignKey("task_status.id"),
nullable=False,
),
Column("title", sa.String(100)),
Column(
"task_type_id",
Integer,
ForeignKey("task_type.id"),
nullable=False,
),
Column("prj_id", Integer, ForeignKey("prj.id"), nullable=False),
)
Table(
"task_status",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"task_type",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"msg",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("posted", sa.DateTime, index=True),
Column("type_id", Integer, ForeignKey("msg_type.id")),
Column("task_id", Integer, ForeignKey("task.id")),
)
Table(
"msg_type",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", sa.String(20)),
Column("display_name", sa.String(20)),
)
@classmethod
def fixtures(cls):
return dict(
prj=(("id",), (1,)),
task_status=(("id",), (1,)),
task_type=(("id",), (1,)),
task=(
("title", "task_type_id", "status_id", "prj_id"),
("task 1", 1, 1, 1),
),
)
@classmethod
def setup_classes(cls):
class Task_Type(cls.Comparable):
pass
class Joined(cls.Comparable):
pass
def test_nested_joins(self):
task, Task_Type, Joined, task_type, msg = (
self.tables.task,
self.classes.Task_Type,
self.classes.Joined,
self.tables.task_type,
self.tables.msg,
)
# this is testing some subtle column resolution stuff,
# concerning corresponding_column() being extremely accurate
# as well as how mapper sets up its column properties
mapper(Task_Type, task_type)
j = sa.outerjoin(task, msg, task.c.id == msg.c.task_id)
jj = sa.select(
[
task.c.id.label("task_id"),
sa.func.count(msg.c.id).label("props_cnt"),
],
from_obj=[j],
group_by=[task.c.id],
).alias("prop_c_s")
jjj = sa.join(task, jj, task.c.id == jj.c.task_id)
mapper(
Joined,
jjj,
properties=dict(type=relationship(Task_Type, lazy="joined")),
)
session = create_session()
eq_(
session.query(Joined).limit(10).offset(0).one(),
Joined(id=1, title="task 1", props_cnt=0),
)
class EagerTest9(fixtures.MappedTest):
"""Test the usage of query options to eagerly load specific paths.
This relies upon the 'path' construct used by PropertyOption to relate
LoaderStrategies to specific paths, as well as the path state maintained
throughout the query setup/mapper instances process.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"accounts",
metadata,
Column(
"account_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"transactions",
metadata,
Column(
"transaction_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"entries",
metadata,
Column(
"entry_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
Column("account_id", Integer, ForeignKey("accounts.account_id")),
Column(
"transaction_id",
Integer,
ForeignKey("transactions.transaction_id"),
),
)
@classmethod
def setup_classes(cls):
class Account(cls.Basic):
pass
class Transaction(cls.Basic):
pass
class Entry(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Account, Transaction, transactions, accounts, entries, Entry = (
cls.classes.Account,
cls.classes.Transaction,
cls.tables.transactions,
cls.tables.accounts,
cls.tables.entries,
cls.classes.Entry,
)
mapper(Account, accounts)
mapper(Transaction, transactions)
mapper(
Entry,
entries,
properties=dict(
account=relationship(
Account,
uselist=False,
backref=backref(
"entries", lazy="select", order_by=entries.c.entry_id
),
),
transaction=relationship(
Transaction,
uselist=False,
backref=backref(
"entries", lazy="joined", order_by=entries.c.entry_id
),
),
),
)
def test_joinedload_on_path(self):
Entry, Account, Transaction = (
self.classes.Entry,
self.classes.Account,
self.classes.Transaction,
)
session = create_session()
tx1 = Transaction(name="tx1")
tx2 = Transaction(name="tx2")
acc1 = Account(name="acc1")
Entry(name="ent11", account=acc1, transaction=tx1)
Entry(name="ent12", account=acc1, transaction=tx2)
acc2 = Account(name="acc2")
Entry(name="ent21", account=acc2, transaction=tx1)
Entry(name="ent22", account=acc2, transaction=tx2)
session.add(acc1)
session.flush()
session.expunge_all()
def go():
# load just the first Account. eager loading will actually load
# all objects saved thus far, but will not eagerly load the
# "accounts" off the immediate "entries"; only the "accounts" off
# the entries->transaction->entries
acc = (
session.query(Account)
.options(
sa.orm.joinedload("entries")
.joinedload("transaction")
.joinedload("entries")
.joinedload("account")
)
.order_by(Account.account_id)
).first()
# no sql occurs
eq_(acc.name, "acc1")
eq_(acc.entries[0].transaction.entries[0].account.name, "acc1")
eq_(acc.entries[0].transaction.entries[1].account.name, "acc2")
# lazyload triggers but no sql occurs because many-to-one uses
# cached query.get()
for e in acc.entries:
assert e.account is acc
self.assert_sql_count(testing.db, go, 1)
|
|
import urllib
import urllib2
import datetime
from collections import namedtuple
from django.conf import settings
from django.template.loader import render_to_string
from billing.models import AuthorizeAIMResponse
from billing import Gateway, GatewayNotConfigured
from billing.signals import *
from billing.utils.credit_card import InvalidCard, Visa, \
MasterCard, Discover, AmericanExpress
from billing.utils.xml_parser import parseString, nodeToDic
API_VERSION = '3.1'
DELIM_CHAR = ','
ENCAP_CHAR = '$'
APPROVED, DECLINED, ERROR, FRAUD_REVIEW = 1, 2, 3, 4
RESPONSE_CODE, RESPONSE_REASON_CODE, RESPONSE_REASON_TEXT = 0, 2, 3
MockAuthorizeAIMResponse = namedtuple(
'AuthorizeAIMResponse', [
'response_code',
'response_reason_code',
'response_reason_text'
]
)
def save_authorize_response(response):
data = {}
data['response_code'] = int(response[0])
data['response_reason_code'] = response[2]
data['response_reason_text'] = response[3]
data['authorization_code'] = response[4]
data['address_verification_response'] = response[5]
data['transaction_id'] = response[6]
data['invoice_number'] = response[7]
data['description'] = response[8]
data['amount'] = response[9]
data['method'] = response[10]
data['transaction_type'] = response[11]
data['customer_id'] = response[12]
data['first_name'] = response[13]
data['last_name'] = response[14]
data['company'] = response[15]
data['address'] = response[16]
data['city'] = response[17]
data['state'] = response[18]
data['zip_code'] = response[19]
data['country'] = response[20]
data['phone'] = response[21]
data['fax'] = response[22]
data['email'] = response[23]
data['shipping_first_name'] = response[24]
data['shipping_last_name'] = response[25]
data['shipping_company'] = response[26]
data['shipping_address'] = response[27]
data['shipping_city'] = response[28]
data['shipping_state'] = response[29]
data['shipping_zip_code'] = response[30]
data['shipping_country'] = response[31]
data['card_code_response'] = response[38]
return AuthorizeAIMResponse.objects.create(**data)
class AuthorizeNetGateway(Gateway):
test_url = "https://test.authorize.net/gateway/transact.dll"
live_url = "https://secure.authorize.net/gateway/transact.dll"
arb_test_url = 'https://apitest.authorize.net/xml/v1/request.api'
arb_live_url = 'https://api.authorize.net/xml/v1/request.api'
supported_countries = ["US"]
default_currency = "USD"
supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
homepage_url = "http://www.authorize.net/"
display_name = "Authorize.Net"
def __init__(self):
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("authorize_net"):
raise GatewayNotConfigured("The '%s' gateway is not correctly "
"configured." % self.display_name)
authorize_net_settings = merchant_settings["authorize_net"]
self.login = authorize_net_settings["LOGIN_ID"]
self.password = authorize_net_settings["TRANSACTION_KEY"]
def add_invoice(self, post, options):
"""add invoice details to the request parameters"""
post['invoice_num'] = options.get('order_id', None)
post['description'] = options.get('description', None)
def add_creditcard(self, post, credit_card):
"""add credit card details to the request parameters"""
post['card_num'] = credit_card.number
post['card_code'] = credit_card.verification_value
post['exp_date'] = credit_card.expire_date
post['first_name'] = credit_card.first_name
post['last_name'] = credit_card.last_name
def add_address(self, post, options):
"""add billing/shipping address details to the request parameters"""
if options.get('billing_address', None):
address = options.get('billing_address')
post['address'] = address.get('address1', '') + \
address.get('address2', '')
post['company'] = address.get('company', '')
post['phone'] = address.get('phone', '')
post['zip'] = address.get('zip', '')
post['city'] = address.get('city', '')
post['country'] = address.get('country', '')
post['state'] = address.get('state', '')
if options.get('shipping_address', None):
address = options.get('shipping_address')
post['ship_to_first_name'] = address.get('name', '').split(" ")[0]
post['ship_to_last_name'] = " ".join(address.get('name', '').split(" ")[1:])
post['ship_to_address'] = address.get('address1', '') + \
address.get('address2', '')
post['ship_to_company'] = address.get('company', '')
post['ship_to_phone'] = address.get('phone', '')
post['ship_to_zip'] = address.get('zip', '')
post['ship_to_city'] = address.get('city', '')
post['ship_to_country'] = address.get('country', '')
post['ship_to_state'] = address.get('state', '')
def add_customer_data(self, post, options):
"""add customer details to the request parameters"""
if 'email' in options:
post['email'] = options['email']
post['email_customer'] = bool(options.get('email_customer', True))
if 'customer' in options:
post['cust_id'] = options['customer']
if 'ip' in options:
post['customer_ip'] = options['ip']
@property
def service_url(self):
if self.test_mode:
return self.test_url
return self.live_url
def commit(self, action, money, parameters):
if not action == 'VOID':
parameters['amount'] = money
parameters['test_request'] = self.test_mode
url = self.service_url
data = self.post_data(action, parameters)
response = self.request(url, data)
return response
def post_data(self, action, parameters=None):
"""add API details, gateway response formating options
to the request parameters"""
if not parameters:
parameters = {}
post = {}
post['version'] = API_VERSION
post['login'] = self.login
post['tran_key'] = self.password
post['relay_response'] = "FALSE"
post['type'] = action
post['delim_data'] = "TRUE"
post['delim_char'] = DELIM_CHAR
post['encap_char'] = ENCAP_CHAR
post.update(parameters)
return urllib.urlencode(dict(('x_%s' % (k), v) for k, v in post.iteritems()))
# this shoud be moved to a requests lib file
def request(self, url, data, headers=None):
"""Make POST request to the payment gateway with the data and return
gateway RESPONSE_CODE, RESPONSE_REASON_CODE, RESPONSE_REASON_TEXT"""
if not headers:
headers = {}
conn = urllib2.Request(url=url, data=data, headers=headers)
try:
open_conn = urllib2.urlopen(conn)
response = open_conn.read()
except urllib2.URLError as e:
return MockAuthorizeAIMResponse(5, '1', str(e))
fields = response[1:-1].split('%s%s%s' % (ENCAP_CHAR, DELIM_CHAR, ENCAP_CHAR))
return save_authorize_response(fields)
def purchase(self, money, credit_card, options=None):
"""Using Authorize.net payment gateway, charge the given
credit card for specified money"""
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
post = {}
self.add_invoice(post, options)
self.add_creditcard(post, credit_card)
self.add_address(post, options)
self.add_customer_data(post, options)
response = self.commit("AUTH_CAPTURE", money, post)
status = "SUCCESS"
if response.response_code != 1:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=response)
else:
transaction_was_successful.send(sender=self,
type="purchase",
response=response)
return {"status": status, "response": response}
def authorize(self, money, credit_card, options=None):
"""Using Authorize.net payment gateway, authorize the
credit card for specified money"""
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
post = {}
self.add_invoice(post, options)
self.add_creditcard(post, credit_card)
self.add_address(post, options)
self.add_customer_data(post, options)
response = self.commit("AUTH_ONLY", money, post)
status = "SUCCESS"
if response.response_code != 1:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="authorization",
response=response)
else:
transaction_was_successful.send(sender=self,
type="authorization",
response=response)
return {"status": status, "response": response}
def capture(self, money, authorization, options=None):
"""Using Authorize.net payment gateway, capture the
authorize credit card"""
if not options:
options = {}
post = {}
post["trans_id"] = authorization
post.update(options)
response = self.commit("PRIOR_AUTH_CAPTURE", money, post)
status = "SUCCESS"
if response.response_code != 1:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="capture",
response=response)
else:
transaction_was_successful.send(sender=self,
type="capture",
response=response)
return {"status": status, "response": response}
def void(self, identification, options=None):
"""Using Authorize.net payment gateway, void the
specified transaction"""
if not options:
options = {}
post = {}
post["trans_id"] = identification
post.update(options)
# commit ignores the money argument for void, so we set it None
response = self.commit("VOID", None, post)
status = "SUCCESS"
if response.response_code != 1:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="void",
response=response)
else:
transaction_was_successful.send(sender=self,
type="void",
response=response)
return {"status": status, "response": response}
def credit(self, money, identification, options=None):
"""Using Authorize.net payment gateway, void the
specified transaction"""
if not options:
options = {}
post = {}
post["trans_id"] = identification
# Authorize.Net requuires the card or the last 4 digits be sent
post["card_num"] = options["credit_card"]
post.update(options)
response = self.commit("CREDIT", money, post)
status = "SUCCESS"
if response.response_code != 1:
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="credit",
response=response)
else:
transaction_was_successful.send(sender=self,
type="credit",
response=response)
return {"status": status, "response": response}
def recurring(self, money, credit_card, options):
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
template_vars = {}
template_vars['auth_login'] = self.login
template_vars['auth_key'] = self.password
template_vars['amount'] = money
template_vars['card_number'] = credit_card.number
template_vars['exp_date'] = credit_card.expire_date
template_vars['start_date'] = options.get('start_date') or datetime.date.today().strftime("%Y-%m-%d")
template_vars['total_occurrences'] = options.get('total_occurences', 9999)
template_vars['interval_length'] = options.get('interval_length', 1)
template_vars['interval_unit'] = options.get('interval_unit', 'months')
template_vars['sub_name'] = options.get('sub_name', '')
template_vars['first_name'] = credit_card.first_name
template_vars['last_name'] = credit_card.last_name
xml = render_to_string('billing/arb/arb_create_subscription.xml', template_vars)
if self.test_mode:
url = self.arb_test_url
else:
url = self.arb_live_url
headers = {'content-type': 'text/xml'}
conn = urllib2.Request(url=url, data=xml, headers=headers)
try:
open_conn = urllib2.urlopen(conn)
xml_response = open_conn.read()
except urllib2.URLError as e:
return MockAuthorizeAIMResponse(5, '1', str(e))
response = nodeToDic(parseString(xml_response))['ARBCreateSubscriptionResponse']
# successful response
# {u'ARBCreateSubscriptionResponse': {u'messages': {u'message': {u'code': u'I00001',
# u'text': u'Successful.'},
# u'resultCode': u'Ok'},
# u'subscriptionId': u'933728'}}
status = "SUCCESS"
if response['messages']['resultCode'].lower() != 'ok':
status = "FAILURE"
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=response)
else:
transaction_was_successful.send(sender=self,
type="recurring",
response=response)
return {"status": status, "response": response}
def store(self, creditcard, options=None):
raise NotImplementedError
def unstore(self, identification, options=None):
raise NotImplementedError
|
|
#!/usr/bin/env python
import os
import sys
import argparse
from collections import OrderedDict, defaultdict
def sanitize(n):
return n.lower().replace(" ", "_").replace(".", "_")
class table(object):
def __init__(self, lines, has_header=True, has_row_name=True):
self.lines = lines
self.has_header = has_header
self.has_row_name = has_row_name
self.header = []
self.rows = []
self.data = []
self.process()
def process(self):
for line_num, line in enumerate(self.lines.split("\n")):
cols = [l.strip() for l in line.split("\t")]
if line_num == 0:
if self.has_header:
self.header = cols
elif self.has_row_name:
self.rows.append(cols[0] )
self.data.append(cols[1:])
else:
if len(cols) > 0 and sum([len(c) for c in cols]) > 0:
self.data.append(cols)
else:
if len(cols) > 0 and sum([len(c) for c in cols]) > 0:
if self.has_row_name:
self.rows.append(cols[0] )
self.data.append(cols[1:])
else:
self.data.append(cols)
def to_html(self, tb_id, tb_class, decimals=1, colored=False, no_diag=False):
tb_class = sanitize(tb_class)
#return OrderedDict((('header', self.header), ('rows', self.rows), ('data', self.data)))
if no_diag:
for a in xrange(len(self.data)):
self.data[a][a] = None
min_val = min([min([float(e) for e in d if e is not None]) for d in self.data])
max_val = max([max([float(e) for e in d if e is not None]) for d in self.data])
print "min_val {} max_val {}".format(min_val, max_val)
res = [' <table id="{}" class="table_header {}" min_val="{}" max_val="{}">'.format(tb_id, tb_class + (" colored" if colored else ""), min_val, max_val)]
if self.has_header:
res.append( ' <tr row_num="-1" row_name="_HEADER_" class="header_row {tb_class}_header">'.format(**{
'tb_class' : tb_class ,
}) )
for col_num, col_name in enumerate(self.header):
res.append( ' <th col_num="{col_num}" col_name="{col_name}" class="header_cell {tb_class}_header {tb_class}_th"><div><span>{col_name}</span></div></th>'.format(**{
'col_num' : col_num ,
'col_name' : col_name ,
'tb_class' : tb_class ,
}) )
res.append( ' </tr>' )
for row_num, row in enumerate(self.data):
row_name = row_num
if len(row) == 0:
continue
res.append( ' <tr row_num="{row_num}" row_name="{row_name}" class="table_row {tb_class}_row">'.format(**{
'row_num' : row_num ,
'row_name' : row_name ,
'tb_class' : tb_class ,
}) )
for col_num, col_val in enumerate(row):
if self.has_row_name and col_num == 0:
row_name = self.rows[row_num]
res.append( ' <th col_num="{col_num}" col_name="{col_name}" row_num="{row_num}" row_name="{row_name}" class="row_name {tb_class}_row_name {tb_class}_th">{row_name}</th>'.format(**{
'col_num' : col_num ,
'col_name' : self.header[col_num],
'row_num' : row_num ,
'row_name' : row_name ,
'tb_class' : tb_class
}) )
if self.has_row_name:
col_num += 1
col_name = col_num
if self.has_header:
col_name = self.header[col_num]
if col_val is None:
col_val = "-"
else:
if '.' in col_val:
col_val = ("{:,."+str(decimals)+"f}").format(float(col_val))
else:
col_val = "{:,d}".format(int(col_val))
res.append( ' <td col_num="{col_num}" col_name="{col_name}" row_num="{row_num}" row_name="{row_name}" class="cell {tb_class}_td">{col_val}</th>'.format(**{
'col_num' : col_num ,
'col_name' : self.header[col_num],
'row_num' : row_num ,
'row_name' : row_name ,
'row_name_s': sanitize(row_name) ,
'tb_class' : tb_class ,
'col_val' : col_val
}) )
res.append( ' </tr>' )
res.append( ' </table>' )
return "\n".join(res)
def main():
basename = sys.argv[1]
f_json, f_stats, f_count, f_matrix = [x.format(basename) for x in ("{}.json","{}.json.count.csv","{}.json.csv","{}.json.jaccard.matrix")]
for f in (f_json, f_stats, f_count, f_matrix):
if not os.path.exists(f):
print "input file {} does not exists".format(f)
sys.exit(1)
json = open(f_json , 'r').read()
stats = open(f_stats , 'r').read()
count = open(f_count , 'r').read()
matrix = open(f_matrix, 'r').read()
stats_t = table(stats , has_header=True, has_row_name=True ).to_html( 'stats_table' , 'stats_table' , decimals=1, colored=False, no_diag=False )
count_t = table(count , has_header=True, has_row_name=True ).to_html( 'count_table' , 'count_table' , decimals=1, colored=True , no_diag=True )
matrix_t = table(matrix, has_header=True, has_row_name=True ).to_html( 'matrix_table', 'matrix_table', decimals=5, colored=True , no_diag=True )
res = TEMPLATE.format(**{
"title" : basename,
"json" : '' , #json ,
"count" : count_t ,
"stats" : stats_t ,
"matrix": matrix_t,
"script": '<script>{}</script>'.format(SCRIPT),
"css" : '<style>{}</style>' .format(CSS ),
# "script": '<script src="{}.js"></script>'.format(basename),
# "css" : '<link rel="stylesheet" href="{}.css">'.format(basename)
})
#print res
open("{}.html".format(basename), 'w').write(res)
TEMPLATE = """<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<title>{title}</title>
{css}
<script src="https://cdn.rawgit.com/gka/chroma.js/master/chroma.min.js"></script>
{script}
</head>
<body>
<script type="application/json" id="json">
{json}
</script>
<div id="stats">
{stats}
</div>
<div id="count">
{count}
</div>
<div id="matrix">
{matrix}
</div>
</body>
</html>
"""
SCRIPT = """
function isNumeric(n) {
//https://stackoverflow.com/questions/18082/validate-decimal-numbers-in-javascript-isnumeric
return !isNaN(parseFloat(n)) && isFinite(n);
}
var bscale = chroma.scale(['yellow' , 'orange', 'red' ]);
var fscale = chroma.scale(['black', 'white']);
function heatMapColorforValue(min_val, max_val, value){
/*
2-3-4-5 = (2 - 2) / (5 - 2) = 0 / 3 = 0
2-3-4-5 = (3 - 2) / (5 - 2) = 1 / 3 = 0.3
2-3-4-5 = (4 - 2) / (5 - 2) = 2 / 3 = 0.6
2-3-4-5 = (5 - 2) / (5 - 2) = 3 / 3 = 1.0
*/
var ival = (value - min_val) / (max_val - min_val);
var bcolor = bscale(ival).hex();
var fcolor = fscale(ival).hex();
return [ival, bcolor, fcolor];
}
function color_cells() {
var coloreds = document.getElementsByClassName("colored");
console.log(coloreds.length, coloreds);
for (var c = 0; c < coloreds.length; c++) {
var e = coloreds[c];
var min_val = parseFloat(e.getAttribute('min_val'));
var max_val = parseFloat(e.getAttribute('max_val'));
console.log("C",c,"E",e,"min",min_val,"max",max_val);
var tds = e.getElementsByTagName("td");
//console.log(" TDS", tds.length, tds);
for ( var t = 0; t < tds.length; t++) {
var td = tds[t];
var value = parseFloat(td.innerHTML.replace(/,/g, ""));
if ( isNumeric(value) ) {
var cdata = heatMapColorforValue(min_val, max_val, value);
var prop = cdata[0];
var bcolor = cdata[1];
var fcolor = cdata[2];
td.style.backgroundColor = bcolor;
//td.style.color = fcolor;
}
}
}
}
document.addEventListener("DOMContentLoaded", color_cells);
"""
CSS = """
th.header_cell {
text-align: left;
height: 500px;
}
th.header_cell > div {
/*float: left;*/
transform:
/*translate(25px, 51px)*/
/*rotate(315deg);*/
translate(15px, 180px)
rotate(270deg);
width: 50px;
/*transform-origin: left top 0;*/
}
th.header_cell > div > span {
/*border-bottom: 1px solid #ccc;*/
padding: 5px 10px;
}
th.row_name {
text-align: left;
}
td.cell {
text-align: right;
/*text-shadow: 1px 1px 1px #000;*/
}
th {
white-space: nowrap;
}
"""
"""
<div id="stats">
<table id="stats_table" class=table_header "stats_table">
<tr row_num="-1" row_name="_HEADER_" class="header_row stats_table_header">
<th col_num="0" col_name="NAME" class="header_cell stats_table_header stats_table_th">NAME</th>
<th col_num="1" col_name="TOTAL" class="header_cell stats_table_header stats_table_th">TOTAL</th>
<th col_num="2" col_name="VALID" class="header_cell stats_table_header stats_table_th">VALID</th>
<th col_num="3" col_name="PROP" class="header_cell stats_table_header stats_table_th">PROP</th>
</tr>
<tr row_num="176" row_name="176" class="table_row stats_table_row">
<th col_num="0" col_name="NAME" row_num="176" row_name="Tribolium castaneum" class="row_name stats_table_row_name stats_table_th">Tribolium castaneum</th>
<td col_num="1" col_name="TOTAL" row_num="176" row_name="Tribolium castaneum" class="cell stats_table_td">6581748</th>
<td col_num="2" col_name="VALID" row_num="176" row_name="Tribolium castaneum" class="cell stats_table_td">1505999</th>
<td col_num="3" col_name="PROP" row_num="176" row_name="Tribolium castaneum" class="cell stats_table_td"> 22.88</th>
</tr>
<tr row_num="177" row_name="177" class="table_row stats_table_row">
</tr>
</table>
</div>
<div id="count">
<table id="count_table" class=table_header "count_table">
<tr row_num="-1" row_name="_HEADER_" class="header_row count_table_header">
<th col_num="0" col_name="" class="header_cell count_table_header count_table_th"></th>
<th col_num="1" col_name="Arabidopsis lyrata" class="header_cell count_table_header count_table_th">Arabidopsis lyrata</th>
<th col_num="2" col_name="Arabidopsis thaliana TAIR10" class="header_cell count_table_header count_table_th">Arabidopsis thaliana TAIR10</th>
<th col_num="3" col_name="Citrus sinensis" class="header_cell count_table_header count_table_th">Citrus sinensis</th>
<th col_num="176" col_name="Struthio camelus australis" class="header_cell count_table_header count_table_th">Struthio camelus australis</th>
<th col_num="177" col_name="Tribolium castaneum" class="header_cell count_table_header count_table_th">Tribolium castaneum</th>
</tr>
<tr row_num="0" row_name="0" class="table_row count_table_row">
<th col_num="0" col_name="" row_num="0" row_name="Arabidopsis lyrata" class="row_name count_table_row_name count_table_th">Arabidopsis lyrata</th>
<td col_num="1" col_name="Arabidopsis lyrata" row_num="0" row_name="Arabidopsis lyrata" class="cell count_table_td">0</th>
<td col_num="176" col_name="Struthio camelus australis" row_num="176" row_name="Tribolium castaneum" class="cell count_table_td">47911</th>
<td col_num="177" col_name="Tribolium castaneum" row_num="176" row_name="Tribolium castaneum" class="cell count_table_td">0</th>
</tr>
<tr row_num="177" row_name="177" class="table_row count_table_row">
</tr>
</table>
</div>
<div id="matrix">
<table id="matrix_table" class=table_header "matrix_table">
<tr row_num="-1" row_name="_HEADER_" class="header_row matrix_table_header">
<th col_num="0" col_name="" class="header_cell matrix_table_header matrix_table_th"></th>
<th col_num="1" col_name="Arabidopsis lyrata" class="header_cell matrix_table_header matrix_table_th">Arabidopsis lyrata</th>
<th col_num="176" col_name="Struthio camelus australis" class="header_cell matrix_table_header matrix_table_th">Struthio camelus australis</th>
<th col_num="177" col_name="Tribolium castaneum" class="header_cell matrix_table_header matrix_table_th">Tribolium castaneum</th>
<th col_num="178" col_name="" class="header_cell matrix_table_header matrix_table_th"></th>
</tr>
<tr row_num="0" row_name="0" class="table_row matrix_table_row">
<th col_num="0" col_name="" row_num="0" row_name="Arabidopsis lyrata" class="row_name matrix_table_row_name matrix_table_th">Arabidopsis lyrata</th>
<td col_num="1" col_name="Arabidopsis lyrata" row_num="0" row_name="Arabidopsis lyrata" class="cell matrix_table_td">1.0000000000</th>
<td col_num="2" col_name="Arabidopsis thaliana TAIR10" row_num="0" row_name="Arabidopsis lyrata" class="cell matrix_table_td">0.7233067471</th>
<td col_num="176" col_name="Struthio camelus australis" row_num="176" row_name="Tribolium castaneum" class="cell matrix_table_td">0.9962171649</th>
<td col_num="177" col_name="Tribolium castaneum" row_num="176" row_name="Tribolium castaneum" class="cell matrix_table_td">1.0000000000</th>
</tr>
<tr row_num="177" row_name="177" class="table_row matrix_table_row">
</tr>
<tr row_num="178" row_name="178" class="table_row matrix_table_row">
</tr>
</table>
</div>
"""
if __name__ == '__main__':
main()
|
|
#
# (c) PySimiam Team 2014
#
# Contact person: Tim Fuchs <typograph@elec.ru>
#
# This class was implemented as a weekly programming excercise
# of the 'Control of Mobile Robots' course by Magnus Egerstedt.
#
import numpy as np
from pose import Pose
from sensor import ProximitySensor
from robot import Robot
from math import ceil, exp, sin, cos, tan, pi
from helpers import Struct
class QuickBot_IRSensor(ProximitySensor):
"""Inherits from the proximity sensor class. Performs calculations specific to the khepera3 for its characterized proximity sensors"""
ir_coeff = np.array([ 1.16931064e+07, -1.49425626e+07, \
7.96904053e+06, -2.28884314e+06, \
3.80068213e+05, -3.64435691e+04, \
1.89558821e+03])
def __init__(self,pose,robot):
# values copied from SimIAm
ProximitySensor.__init__(self, pose, robot, (0.04, 0.3, np.radians(6)))
def distance_to_value(self,dst):
"""Returns the distance calculation from the distance readings of the proximity sensors"""
if dst < self.rmin :
return 917
elif dst > self.rmax:
return 133
else:
return np.polyval(self.ir_coeff,dst)
class QuickBot(Robot):
"""Inherts for the simobject--->robot class for behavior specific to the Khepera3"""
def __init__(self, pose, color = 0xFFFFFF):
Robot.__init__(self, pose, color)
# create shape
self._shapes = Struct()
self._shapes.base_plate = np.array([[ 0.0335, 0.0534, 1],
[ 0.0429, 0.0534, 1],
[ 0.0639, 0.0334, 1],
[ 0.0686, 0.0000, 1],
[ 0.0639,-0.0334, 1],
[ 0.0429,-0.0534, 1],
[ 0.0335,-0.0534, 1],
[-0.0465,-0.0534, 1],
[-0.0815,-0.0534, 1],
[-0.1112,-0.0387, 1],
[-0.1112, 0.0387, 1],
[-0.0815, 0.0534, 1],
[-0.0465, 0.0534, 1]])
self._shapes.bbb = np.array([[-0.0914,-0.0406, 1],
[-0.0944,-0.0376, 1],
[-0.0944, 0.0376, 1],
[-0.0914, 0.0406, 1],
[-0.0429, 0.0406, 1],
[-0.0399, 0.0376, 1],
[-0.0399,-0.0376, 1],
[-0.0429,-0.0406, 1]])
self._shapes.bbb_rail_l = np.array([[-0.0429, -0.0356,1],
[-0.0429, 0.0233,1],
[-0.0479, 0.0233,1],
[-0.0479,-0.0356,1]])
self._shapes.bbb_rail_r = np.array([[-0.0914,-0.0356,1],
[-0.0914, 0.0233,1],
[-0.0864, 0.0233,1],
[-0.0864,-0.0356,1]])
self._shapes.bbb_eth = np.array([[-0.0579, 0.0436, 1],
[-0.0579, 0.0226, 1],
[-0.0739, 0.0226, 1],
[-0.0739, 0.0436, 1]])
self._shapes.left_wheel = np.array([[ 0.0254, 0.0595, 1],
[ 0.0254, 0.0335, 1],
[-0.0384, 0.0335, 1],
[-0.0384, 0.0595, 1]])
self._shapes.left_wheel_ol = np.array([[ 0.0254, 0.0595, 1],
[ 0.0254, 0.0335, 1],
[-0.0384, 0.0335, 1],
[-0.0384, 0.0595, 1]])
self._shapes.right_wheel_ol = np.array([[ 0.0254,-0.0595, 1],
[ 0.0254,-0.0335, 1],
[-0.0384,-0.0335, 1],
[-0.0384,-0.0595, 1]])
self._shapes.right_wheel = np.array([[ 0.0254,-0.0595, 1],
[ 0.0254,-0.0335, 1],
[-0.0384,-0.0335, 1],
[-0.0384,-0.0595, 1]])
self._shapes.ir_1 = np.array([[-0.0732, 0.0534, 1],
[-0.0732, 0.0634, 1],
[-0.0432, 0.0634, 1],
[-0.0432, 0.0534, 1]])
self._shapes.ir_2 = np.array([[ 0.0643, 0.0214, 1],
[ 0.0714, 0.0285, 1],
[ 0.0502, 0.0497, 1],
[ 0.0431, 0.0426, 1]])
self._shapes.ir_3 = np.array([[ 0.0636,-0.0042, 1],
[ 0.0636, 0.0258, 1],
[ 0.0736, 0.0258, 1],
[ 0.0736,-0.0042, 1]])
self._shapes.ir_4 = np.array([[ 0.0643,-0.0214, 1],
[ 0.0714,-0.0285, 1],
[ 0.0502,-0.0497, 1],
[ 0.0431,-0.0426, 1]])
self._shapes.ir_5 = np.array([[-0.0732,-0.0534, 1],
[-0.0732,-0.0634, 1],
[-0.0432,-0.0634, 1],
[-0.0432,-0.0534, 1]])
self._shapes.bbb_usb = np.array([[-0.0824,-0.0418, 1],
[-0.0694,-0.0418, 1],
[-0.0694,-0.0278, 1],
[-0.0824,-0.0278, 1]])
# create IR sensors
self.ir_sensors = []
ir_sensor_poses = [
Pose(-0.0474, 0.0534, np.radians(90)),
Pose( 0.0613, 0.0244, np.radians(45)),
Pose( 0.0636, 0.0, np.radians(0)),
Pose( 0.0461,-0.0396, np.radians(-45)),
Pose(-0.0690,-0.0534, np.radians(-90))
]
for pose in ir_sensor_poses:
self.ir_sensors.append(QuickBot_IRSensor(pose,self))
# initialize motion
self.ang_velocity = (0.0,0.0)
self.info = Struct()
self.info.wheels = Struct()
# these were the original parameters
self.info.wheels.radius = 0.0325
self.info.wheels.base_length = 0.09925
self.info.wheels.ticks_per_rev = 16.0
self.info.wheels.left_ticks = 0
self.info.wheels.right_ticks = 0
self.info.wheels.max_velocity = 2*pi*130/60 # 130 RPM
self.info.wheels.min_velocity = 2*pi*30/60 # 30 RPM
self.left_revolutions = 0.0
self.right_revolutions = 0.0
self.info.ir_sensors = Struct()
self.info.ir_sensors.poses = ir_sensor_poses
self.info.ir_sensors.readings = None
self.info.ir_sensors.rmax = 0.3
self.info.ir_sensors.rmin = 0.04
def draw(self,r):
r.set_pose(self.get_pose())
r.set_pen(0)
r.set_brush(0)
r.draw_polygon(self._shapes.ir_1)
r.draw_polygon(self._shapes.ir_2)
r.draw_polygon(self._shapes.ir_3)
r.draw_polygon(self._shapes.ir_4)
r.draw_polygon(self._shapes.ir_5)
r.draw_polygon(self._shapes.left_wheel)
r.draw_polygon(self._shapes.right_wheel)
r.set_pen(0x01000000)
r.set_brush(self.get_color())
r.draw_polygon(self._shapes.base_plate)
r.set_pen(0x10000000)
r.set_brush(None)
r.draw_polygon(self._shapes.left_wheel)
r.draw_polygon(self._shapes.right_wheel)
r.set_pen(None)
r.set_brush(0x333333)
r.draw_polygon(self._shapes.bbb)
r.set_brush(0)
r.draw_polygon(self._shapes.bbb_rail_l)
r.draw_polygon(self._shapes.bbb_rail_r)
r.set_brush(0xb2b2b2)
r.draw_polygon(self._shapes.bbb_eth)
r.draw_polygon(self._shapes.bbb_usb)
def get_envelope(self):
return self._shapes.base_plate
def move(self,dt):
# There's no need to use the integrator - these equations have a solution
(vl, vr) = self.get_wheel_speeds()
(v,w) = self.diff2uni((vl,vr))
x, y, theta = self.get_pose()
if w == 0:
x += v*cos(theta)*dt
y += v*sin(theta)*dt
else:
dtheta = w*dt
x += 2*v/w*cos(theta + dtheta/2)*sin(dtheta/2)
y += 2*v/w*sin(theta + dtheta/2)*sin(dtheta/2)
theta += dtheta
self.set_pose(Pose(x, y, (theta + pi)%(2*pi) - pi))
self.left_revolutions += vl*dt/2/pi
self.right_revolutions += vr*dt/2/pi
self.info.wheels.left_ticks = int(self.left_revolutions*self.info.wheels.ticks_per_rev)
self.info.wheels.right_ticks = int(self.right_revolutions*self.info.wheels.ticks_per_rev)
def get_info(self):
self.info.ir_sensors.readings = [sensor.reading() for sensor in self.ir_sensors]
return self.info
def set_inputs(self,inputs):
self.set_wheel_speeds(inputs)
def diff2uni(self,diff):
(vl,vr) = diff
v = (vl+vr) * self.info.wheels.radius/2;
w = (vr-vl) * self.info.wheels.radius/self.info.wheels.base_length;
return (v,w)
def get_wheel_speeds(self):
return self.ang_velocity
def set_wheel_speeds(self,*args):
if len(args) == 2:
(vl, vr) = args
else:
(vl, vr) = args[0]
left_ms = max(-self.info.wheels.max_velocity, min(self.info.wheels.max_velocity, vl))
right_ms = max(-self.info.wheels.max_velocity, min(self.info.wheels.max_velocity, vr))
self.ang_velocity = (left_ms, right_ms)
def get_external_sensors(self):
return self.ir_sensors
def draw_sensors(self,renderer):
"""Draw the sensors that this robot has"""
for sensor in self.ir_sensors:
sensor.draw(renderer)
def update_sensors(self):
for sensor in self.ir_sensors:
sensor.update_distance()
if __name__ == "__main__":
# JP limits
#v = max(min(v,0.314),-0.3148);
#w = max(min(w,2.276),-2.2763);
# Real limits
k = QuickBot(Pose(0,0,0))
k.set_wheel_speeds(1000,1000)
print(k.diff2uni(k.get_wheel_speeds()))
k.set_wheel_speeds(1000,-1000)
print(k.diff2uni(k.get_wheel_speeds()))
# 0.341 and 7.7
|
|
#!/usr/local/bin/python
from util import getRandomDist
import numpy as np
import logging
from math import floor, ceil
class TokenBucket(object):
def __init__ (self, id = 0) :
self.id = id
self.input = []
self.output = None
self.rate = 1
self.nextTime = float('inf')
self.nextSize = 0.0
self.time = 0.0
self.tokenNum = 0
self.bucketSize = 10
self.nextIndex = 0
self.queue = 0
self.lastUpdateTime = 0.0
self.usedToken = 0
#self.enqueueTime = []
#self.dequeueTime = []
#self.dropTime = []
self.finishTime = []
def whoAmI(self):
logging.debug("[TokenBucket %d]", self.id)
def setRate(self, rate):
self.rate = rate
def setInput(self, input):
if isinstance(input, list):
self.input += input
else:
self.input.append(input)
def setOutput(self, output):
self.output = output
self.output.setInput(self)
def getNextTime(self):
nextTime = []
nextItem = []
for i in self.input:
time, item = i.getNextTime()
nextTime.append(time)
nextItem.append(item)
index = nextTime.index(min(nextTime))
ntime = nextTime[index]
nitem = nextItem[index]
if self.nextTime <= ntime:
return self.nextTime, self
else :
return ntime, nitem
def runTime(self, time):
self.updateTime(time)
# this item triggers the change
if self.time == self.nextTime:
# update nextTime, push job to next
self.tokenNum = self.nextSize
while self.pullJob(time):
continue
#print "[Bucket] pull job"
#else:
# logging.info("[Bucket %d] runtime error, should not be here %f %f", self.id, self.time, self.nextTime)
def setParameters(self, rate = -1, size = -1, tokenNum = -1):
if rate != -1:
self.rate = rate
if size != -1:
self.bucketSize = size
if tokenNum != -1:
self.tokenNum = tokenNum
else:
self.tokenNum = 0 # initial token number to half of token bucket size
def updateParameters(self, time, rate = -1, size = -1, tokenNum = -1):
self.updateTime(time)
self.setParameters(rate, size, tokenNum)
logging.info("[Bucket %d] update token bucket, %f, %d, %d", self.id, rate, size, tokenNum)
def updateTime(self, time):
self.time = time
def hasJob(self):
return self.queue > 0
def hasQueue(self, time, size):
if time != self.nextTime:
# self.tokenNum = time * self.rate - self.usedToken
# if self.tokenNum >= self.bucketSize + 1:
# self.usedToken += floor(self.tokenNum) - self.bucketSize
# self.tokenNum -= floor(self.tokenNum) - self.bucketSize
self.tokenNum += (time - self.lastUpdateTime) * self.rate
if self.tokenNum > self.bucketSize:
self.tokenNum = self.bucketSize
else:
self.tokenNum = size
self.lastUpdateTime = time
if self.tokenNum >= size:
self.nextTime = float('inf')
logging.debug("tokenNum %f, lastupdateTime %f, nextTime %f, time%f", self.tokenNum, self.lastUpdateTime, self.nextTime, time)
return True
else:
self.getNextUpdateTime(time, size)
logging.debug("tokenNum %f, lastupdateTime %f, nextTime %f, time%f", self.tokenNum, self.lastUpdateTime, self.nextTime, time)
return False
def getNextUpdateTime(self, time, size):
self.nextTime = time + (size - self.tokenNum)/self.rate
#self.nextSize = size
#return self.empty == True
def decreaseQueue(self):
self.tokenNum -= self.queue
self.usedToken += self.queue
self.queue = 0
def increaseQueue(self, size):
self.queue = size
# return True when success dequeue a job
def dequeueJob(self, time):
# no output, then dequeue and mark as finished
if self.output == None:
self.finishTime.append(time)
# output enqueueJob fail
elif not self.output.enqueueJob(time, self.queue):
return False
#self.dequeueTime.append(time)
self.decreaseQueue()
logging.debug("[Bucket %d] dequeue job, %f", self.id, self.tokenNum)
return True
# pull job from input
# return True when pull a job
def pullJob(self, time):
# has a job in this element, then dequeue
logging.debug("[Bucket %d] pull job", self.id)
if self.input != []:
# get from one of them, with round-robin
for i in xrange(len(self.input)):
index = (self.nextIndex + i) % len(self.input)
if not self.input[index].pullJob(time):
continue
# get job from pull job, dequeue and increase nextIndex
self.nextIndex = (i+1)% len(self.input)
return self.dequeueJob(time)
return False
# return True if enqueue success, otherwise False
# called by dequeueJob() of self.input
def enqueueJob(self, time, size):
if not self.hasQueue(time, size) :
logging.debug("[Bucket %d] enqueue job fail size %f, nextTime %f, tokenNum %f, lastupdateTime %f", self.id, size, self.nextTime, self.tokenNum, self.lastUpdateTime)
return False
logging.debug("[Bucket %d] enqueue job, with token %f", self.id, self.tokenNum)
#self.enqueueTime.append(time)
self.increaseQueue(size)
return True
# called by input, push job to output if possible
def pushJob(self, time):
if not self.dequeueJob(time):
return False
return True
#return self.pushJob(time, size)
def getQueueSize(self, time):
size = 0
if self.input != []:
for q in self.input:
size += q.getQueueSize(time)
return size
def getRate(self, time):
rate = 0
if self.input != []:
for q in self.input:
rate += q.getRate(time)
return rate
def getTokenNum(self):
return self.tokenNum
# def showStatistic(self, startTime):
# print "Token Bucket", self.id
# i = 0
# for i in xrange(len(self.dequeueTime)):
# if startTime <= self.dequeueTime[i]:
# break
# j = len(self.dequeueTime)
# queueingTime = np.array(self.dequeueTime[i:j]) - np.array(self.enqueueTime[i:j])
# #print "Queueing Time", queueingTime
# print "with mean", np.mean(queueingTime), "and variance", np.var(queueingTime)
# print "drop number", len(self.dropTime)
# print "Finish number", len(self.finishTime)
# print "Still in queue", len(self.enqueueTime) - len(self.dequeueTime)
# print "\n\n"
# def run(self, stopTime):
# while self.time < stopTime:
# self.runNextTime()
# print "stop running", self.time
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
import metron_service
from metron_security import kinit
# Wrap major operations and functionality in this class
class EnrichmentCommands:
__params = None
__enrichment_topology = None
__enrichment_topic = None
__kafka_configured = False
__kafka_acl_configured = False
__hbase_configured = False
__hbase_acl_configured = False
__maxmind_configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__enrichment_topology = params.metron_enrichment_topology
self.__enrichment_topic = params.enrichment_input_topic
self.__kafka_configured = os.path.isfile(self.__params.enrichment_kafka_configured_flag_file)
self.__kafka_acl_configured = os.path.isfile(self.__params.enrichment_kafka_acl_configured_flag_file)
self.__hbase_configured = os.path.isfile(self.__params.enrichment_hbase_configured_flag_file)
self.__hbase_coprocessor_configured = os.path.isfile(self.__params.enrichment_hbase_coprocessor_configured_flag_file)
self.__hbase_acl_configured = os.path.isfile(self.__params.enrichment_hbase_acl_configured_flag_file)
self.__maxmind_configured = os.path.isfile(self.__params.enrichment_maxmind_configured_flag_file)
def __get_topics(self):
return [self.__enrichment_topic, self.__params.enrichment_error_topic]
def __get_kafka_acl_groups(self):
return [self.__enrichment_topic]
def is_kafka_configured(self):
return self.__kafka_configured
def is_kafka_acl_configured(self):
return self.__kafka_acl_configured
def is_hbase_configured(self):
return self.__hbase_configured
def is_hbase_coprocessor_configured(self):
return self.__hbase_coprocessor_configured
def is_hbase_acl_configured(self):
return self.__hbase_acl_configured
def is_maxmind_configured(self):
return self.__maxmind_configured
def set_kafka_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_kafka_configured_flag_file, "Setting Kafka configured to True for enrichment")
def set_kafka_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_kafka_acl_configured_flag_file, "Setting Kafka ACL configured to True for enrichment")
def set_hbase_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_hbase_configured_flag_file, "Setting HBase configured to True for enrichment")
def set_hbase_coprocessor_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_hbase_coprocessor_configured_flag_file, "Setting HBase coprocessor configured to True for enrichment")
def set_hbase_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_hbase_acl_configured_flag_file, "Setting HBase ACL configured to True for enrichment")
def set_maxmind_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.enrichment_maxmind_configured_flag_file, "Setting Maxmind databases configured to True for enrichment")
def init_maxmind(self):
Logger.info("Creating HDFS locations for MaxMind databases")
self.__params.HdfsResource(self.__params.geoip_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.metron_group,
mode=0755,
)
self.__params.HdfsResource(self.__params.asn_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.metron_group,
mode=0755,
)
Logger.info("Creating and loading Maxmind databases")
command_template = """{0}/bin/maxmind_enrichment_load.sh \
-g {1} \
-a {2} \
-r {3} \
-ra {4} \
-z {5}"""
command = command_template.format(self.__params.metron_home,
self.__params.geoip_url,
self.__params.asn_url,
self.__params.geoip_hdfs_dir,
self.__params.asn_hdfs_dir,
self.__params.zookeeper_quorum
)
Logger.info("Executing command " + command)
Execute(command, user=self.__params.metron_user, tries=1, logoutput=True)
Logger.info("Done intializing Maxmind databases")
self.set_maxmind_configured()
def init_kafka_topics(self):
Logger.info('Creating Kafka topics for enrichment')
# All errors go to indexing topics, so create it here if it's not already
metron_service.init_kafka_topics(self.__params, self.__get_topics())
self.set_kafka_configured()
def init_kafka_acls(self):
Logger.info('Creating Kafka ACls for enrichment')
metron_service.init_kafka_acls(self.__params, self.__get_topics())
# Enrichment topic names matches group
metron_service.init_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
self.set_kafka_acl_configured()
def start_enrichment_topology(self, env):
Logger.info("Starting Metron enrichment topology: {0}".format(self.__enrichment_topology))
if not self.is_topology_active(env):
# which enrichment topology needs started?
if self.__params.enrichment_topology == "Unified":
topology_flux = "{0}/flux/enrichment/remote-unified.yaml".format(self.__params.metron_home)
topology_props = "{0}/config/enrichment-unified.properties".format(self.__params.metron_home)
elif self.__params.enrichment_topology == "Split-Join":
topology_flux = "{0}/flux/enrichment/remote-splitjoin.yaml".format(self.__params.metron_home)
topology_props = "{0}/config/enrichment-splitjoin.properties".format(self.__params.metron_home)
else:
raise Fail("Unexpected enrichment topology; name=" + self.__params.enrichment_topology)
# start the topology
start_cmd_template = """{0}/bin/start_enrichment_topology.sh --remote {1} --filter {2}"""
Logger.info('Starting ' + self.__enrichment_topology)
start_cmd = start_cmd_template.format(self.__params.metron_home, topology_flux, topology_props)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Enrichment topology already running')
Logger.info('Finished starting enrichment topology')
def stop_enrichment_topology(self, env):
Logger.info('Stopping ' + self.__enrichment_topology)
if self.is_topology_active(env):
stop_cmd = 'storm kill ' + self.__enrichment_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Enrichment topology already stopped")
Logger.info('Done stopping enrichment topologies')
def restart_enrichment_topology(self, env):
Logger.info('Restarting the enrichment topologies')
self.stop_enrichment_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing topology still active. Will wait and retry')
time.sleep(40)
topology_active = self.is_topology_active(env)
retries += 1
if not topology_active:
self.start_enrichment_topology(env)
Logger.info('Done restarting the enrichment topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def is_topology_active(self, env):
env.set_params(self.__params)
active = True
topologies = metron_service.get_running_topologies(self.__params)
is_running = False
if self.__enrichment_topology in topologies:
is_running = topologies[self.__enrichment_topology] in ['ACTIVE', 'REBALANCING']
active &= is_running
return active
def create_hbase_tables(self):
Logger.info("Creating HBase Tables")
metron_service.create_hbase_table(self.__params,
self.__params.enrichment_hbase_table,
self.__params.enrichment_hbase_cf)
metron_service.create_hbase_table(self.__params,
self.__params.enrichment_list_hbase_table,
self.__params.enrichment_list_hbase_cf)
metron_service.create_hbase_table(self.__params,
self.__params.threatintel_hbase_table,
self.__params.threatintel_hbase_cf)
Logger.info("Done creating HBase Tables")
self.set_hbase_configured()
def load_enrichment_coprocessor(self):
Logger.info("Creating HDFS location for enrichment coprocessor and loading from local disk")
self.__params.HdfsResource(self.__params.hbase_coprocessor_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.metron_group,
mode=0755,
source=self.__params.hbase_coprocessor_local_dir,
recursive_chown = True)
Logger.info("Loading HBase coprocessor for enrichments")
Logger.info("See https://hbase.apache.org/1.1/book.html#load_coprocessor_in_shell for more detail")
Logger.info("HBase coprocessor setup - first disabling the enrichments HBase table.")
command_template = "echo \"disable '{0}'\" | hbase shell -n"
command = command_template.format(self.__params.enrichment_hbase_table)
Logger.info("Executing command " + command)
Execute(command, user=self.__params.metron_user, tries=1, logoutput=True)
Logger.info("HBase coprocessor setup - altering table and adding coprocessor.")
command_template = "{0}/bin/load_enrichment_coprocessor.sh {1} {2} {3} {4} {5}"
command = command_template.format(self.__params.metron_home,
self.__params.enrichment_hbase_table,
self.__params.hdfs_url,
self.__params.hbase_coprocessor_hdfs_dir,
self.__params.enrichment_list_hbase_coprocessor_impl,
self.__params.zookeeper_quorum)
Logger.info("Executing command " + command)
Execute(command, user=self.__params.metron_user, tries=1, logoutput=True)
Logger.info("HBase coprocessor setup - re-enabling enrichments table.")
command_template = "echo \"enable'{0}'\" | hbase shell -n"
command = command_template.format(self.__params.enrichment_hbase_table)
Logger.info("Executing command " + command)
Execute(command, user=self.__params.metron_user, tries=1, logoutput=True)
Logger.info("HBase coprocessor setup - verifying coprocessor was loaded. The coprocessor should be listed in the TABLE_ATTRIBUTES.")
command_template = "echo \"describe '{0}'\" | hbase shell -n"
command = command_template.format(self.__params.enrichment_hbase_table)
Logger.info("Executing command " + command)
Execute(command, user=self.__params.metron_user, tries=1, logoutput=True)
Logger.info("Done loading HBase coprocessor for enrichments")
self.set_hbase_coprocessor_configured()
def set_hbase_acls(self):
Logger.info("Setting HBase ACLs")
if self.__params.security_enabled:
kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"grant '{0}', 'RW', '{1}'\" | hbase shell -n"
add_enrichment_acl_cmd = cmd.format(self.__params.metron_user, self.__params.enrichment_hbase_table)
Execute(add_enrichment_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
add_enrichment_list_acl_cmd = cmd.format(self.__params.metron_user, self.__params.enrichment_list_hbase_table)
Execute(add_enrichment_list_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
add_threatintel_acl_cmd = cmd.format(self.__params.metron_user, self.__params.threatintel_hbase_table)
Execute(add_threatintel_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done setting HBase ACLs")
self.set_hbase_acl_configured()
def service_check(self, env):
"""
Performs a service check for Enrichment.
:param env: Environment
"""
Logger.info("Checking for Geo database")
metron_service.check_hdfs_file_exists(self.__params, self.__params.geoip_hdfs_dir + "/GeoLite2-City.tar.gz")
Logger.info("Checking for ASN database")
metron_service.check_hdfs_file_exists(self.__params, self.__params.asn_hdfs_dir + "/GeoLite2-ASN.tar.gz")
Logger.info('Checking Kafka topics for Enrichment')
metron_service.check_kafka_topics(self.__params, self.__get_topics())
Logger.info("Checking HBase for Enrichment")
metron_service.check_hbase_table(
self.__params,
self.__params.enrichment_hbase_table)
metron_service.check_hbase_column_family(
self.__params,
self.__params.enrichment_hbase_table,
self.__params.enrichment_hbase_cf)
Logger.info("Checking HBase for Enrichment List")
metron_service.check_hbase_table(
self.__params,
self.__params.enrichment_list_hbase_table)
metron_service.check_hbase_column_family(
self.__params,
self.__params.enrichment_list_hbase_table,
self.__params.enrichment_list_hbase_cf)
Logger.info("Checking HBase for Threat Intel")
metron_service.check_hbase_table(
self.__params,
self.__params.threatintel_hbase_table)
metron_service.check_hbase_column_family(
self.__params,
self.__params.threatintel_hbase_table,
self.__params.threatintel_hbase_cf)
if self.__params.security_enabled:
Logger.info('Checking Kafka ACLs for Enrichment')
metron_service.check_kafka_acls(self.__params, self.__get_topics())
metron_service.check_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
Logger.info("Checking HBase ACLs for Enrichment")
metron_service.check_hbase_acls(self.__params, self.__params.enrichment_hbase_table)
Logger.info("Checking HBase ACLs for Enrichment List")
metron_service.check_hbase_acls(self.__params, self.__params.enrichment_list_hbase_table)
Logger.info("Checking HBase ACLs for Threat Intel")
metron_service.check_hbase_acls(self.__params, self.__params.threatintel_hbase_table)
Logger.info("Checking for Enrichment topology")
if not self.is_topology_active(env):
raise Fail("Enrichment topology not running")
Logger.info("Enrichment service check completed successfully")
|
|
# Copyright 2011 Omniscale (http://omniscale.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from imposm.mapping import (
Options,
Points, LineStrings, Polygons,
String, Bool, Integer, OneOfInt,
set_default_name_type, LocalizedName,
WayZOrder, ZOrder, Direction,
GeneralizedTable, UnionView,
FixInvalidPolygons,
PseudoArea, meter_to_mapunit, sqr_meter_to_mapunit,
)
# # internal configuration options
# # uncomment to make changes to the default values
# import imposm.config
#
# # import relations with missing rings
# imposm.config.import_partial_relations = False
#
# # select relation builder: union or contains
# imposm.config.relation_builder = 'contains'
#
# # log relation that take longer than x seconds
# imposm.config.imposm_multipolygon_report = 60
#
# # skip relations with more rings (0 skip nothing)
# imposm.config.imposm_multipolygon_max_ring = 0
#
# # split ways that are longer than x nodes (0 to split nothing)
# imposm.config.imposm_linestring_max_length = 50
#
# # cache coords in a compact storage (with delta encoding)
# # use this when memory is limited (default)
# imposm.config.imposm_compact_coords_cache = True
# set_default_name_type(LocalizedName(['name:en', 'int_name', 'name']))
db_conf = Options(
# db='osm',
host='localhost',
port=5432,
user='osm',
password='osm',
sslmode='allow',
prefix='osm_new_',
proj='epsg:900913',
)
class Highway(LineStrings):
fields = (
('tunnel', Bool()),
('bridge', Bool()),
('oneway', Direction()),
('ref', String()),
('z_order', WayZOrder()),
)
field_filter = (
('area', Bool()),
)
places = Points(
name = 'places',
mapping = {
'place': (
'country',
'state',
'region',
'county',
'city',
'town',
'village',
'hamlet',
'suburb',
'locality',
),
},
fields = (
('z_order', ZOrder([
'country',
'state',
'region',
'county',
'city',
'town',
'village',
'hamlet',
'suburb',
'locality',
])),
('population', Integer()),
),
)
admin = Polygons(
name = 'admin',
mapping = {
'boundary': (
'administrative',
),
},
fields = (
('admin_level', OneOfInt('1 2 3 4 5 6'.split())),
),
)
motorways = Highway(
name = 'motorways',
mapping = {
'highway': (
'motorway',
'motorway_link',
'trunk',
'trunk_link',
),
}
)
mainroads = Highway(
name = 'mainroads',
mapping = {
'highway': (
'primary',
'primary_link',
'secondary',
'secondary_link',
'tertiary',
)}
)
buildings = Polygons(
name = 'buildings',
mapping = {
'building': (
'__any__',
)}
)
minorroads = Highway(
name = 'minorroads',
mapping = {
'highway': (
'road',
'path',
'track',
'service',
'footway',
'bridleway',
'cycleway',
'steps',
'pedestrian',
'living_street',
'unclassified',
'residential',
)}
)
transport_points = Points(
name = 'transport_points',
fields = (
('ref', String()),
),
mapping = {
'highway': (
'motorway_junction',
'turning_circle',
'bus_stop',
),
'railway': (
'station',
'halt',
'tram_stop',
'crossing',
'level_crossing',
'subway_entrance',
),
'aeroway': (
'aerodome',
'terminal',
'helipad',
'gate',
)}
)
railways = LineStrings(
name = 'railways',
fields = (
('tunnel', Bool()),
('bridge', Bool()),
# ('ref', String()),
('z_order', WayZOrder()),
),
mapping = {
'railway': (
'rail',
'tram',
'light_rail',
'subway',
'narrow_gauge',
'preserved',
'funicular',
'monorail',
)}
)
waterways = LineStrings(
name = 'waterways',
mapping = {
'waterway': (
'stream',
'river',
'canal',
'drain',
)},
field_filter = (
('tunnel', Bool()),
),
)
waterareas = Polygons(
name = 'waterareas',
mapping = {
'waterway': ('riverbank',),
'natural': ('water',),
'landuse': ('basin', 'reservoir'),
})
aeroways = LineStrings(
name = 'aeroways',
mapping = {
'aeroway': (
'runway',
'taxiway',
)}
)
transport_areas = Polygons(
name = 'transport_areas',
mapping = {
'railway': (
'station',
),
'aeroway': (
'aerodrome',
'terminal',
'helipad',
'apron',
),
})
landusages = Polygons(
name = 'landusages',
fields = (
('area', PseudoArea()),
('z_order', ZOrder([
'pedestrian',
'footway',
'playground',
'park',
'forest',
'cemetery',
'farmyard',
'farm',
'farmland',
'wood',
'meadow',
'grass',
'village_green',
'recreation_ground',
'garden',
'sports_centre',
'pitch',
'common',
'allotments',
'golf_course',
'university',
'school',
'college',
'library',
'fuel',
'parking',
'nature_reserve',
'cinema',
'theatre',
'place_of_worship',
'hospital',
'scrub',
'quarry',
'residential',
'retail',
'commercial',
'industrial',
'railway',
'land',
])),
),
mapping = {
'landuse': (
'park',
'forest',
'residential',
'retail',
'commercial',
'industrial',
'railway',
'cemetery',
'grass',
'farmyard',
'farm',
'farmland',
'wood',
'meadow',
'village_green',
'recreation_ground',
'allotments',
'quarry',
),
'leisure': (
'park',
'garden',
'playground',
'golf_course',
'sports_centre',
'pitch',
'stadium',
'common',
'nature_reserve',
),
'natural': (
'wood',
'land',
'scrub',
),
'highway': (
'pedestrian',
'footway',
),
'amenity': (
'university',
'school',
'college',
'library',
'fuel',
'parking',
'cinema',
'theatre',
'place_of_worship',
'hospital',
),
})
amenities = Points(
name='amenities',
mapping = {
'amenity': (
'university',
'school',
'library',
'fuel',
'hospital',
'fire_station',
'police',
'townhall',
),
})
motorways_gen1 = GeneralizedTable(
name = 'motorways_gen1',
tolerance = meter_to_mapunit(50.0),
origin = motorways,
)
mainroads_gen1 = GeneralizedTable(
name = 'mainroads_gen1',
tolerance = meter_to_mapunit(50.0),
origin = mainroads,
)
railways_gen1 = GeneralizedTable(
name = 'railways_gen1',
tolerance = meter_to_mapunit(50.0),
origin = railways,
)
motorways_gen0 = GeneralizedTable(
name = 'motorways_gen0',
tolerance = meter_to_mapunit(200.0),
origin = motorways_gen1,
)
mainroads_gen0 = GeneralizedTable(
name = 'mainroads_gen0',
tolerance = meter_to_mapunit(200.0),
origin = mainroads_gen1,
)
railways_gen0 = GeneralizedTable(
name = 'railways_gen0',
tolerance = meter_to_mapunit(200.0),
origin = railways_gen1,
)
landusages_gen0 = GeneralizedTable(
name = 'landusages_gen0',
tolerance = meter_to_mapunit(200.0),
origin = landusages,
where = "ST_Area(geometry)>%f" % sqr_meter_to_mapunit(500000),
)
landusages_gen1 = GeneralizedTable(
name = 'landusages_gen1',
tolerance = meter_to_mapunit(50.0),
origin = landusages,
where = "ST_Area(geometry)>%f" % sqr_meter_to_mapunit(50000),
)
waterareas_gen0 = GeneralizedTable(
name = 'waterareas_gen0',
tolerance = meter_to_mapunit(200.0),
origin = waterareas,
where = "ST_Area(geometry)>%f" % sqr_meter_to_mapunit(500000),
)
waterareas_gen1 = GeneralizedTable(
name = 'waterareas_gen1',
tolerance = meter_to_mapunit(50.0),
origin = waterareas,
where = "ST_Area(geometry)>%f" % sqr_meter_to_mapunit(50000),
)
roads = UnionView(
name = 'roads',
fields = (
('bridge', 0),
('ref', None),
('tunnel', 0),
('oneway', 0),
('z_order', 0),
),
mappings = [motorways, mainroads, minorroads, railways],
)
roads_gen1 = UnionView(
name = 'roads_gen1',
fields = (
('bridge', 0),
('ref', None),
('tunnel', 0),
('oneway', 0),
('z_order', 0),
),
mappings = [railways_gen1, mainroads_gen1, motorways_gen1],
)
roads_gen0 = UnionView(
name = 'roads_gen0',
fields = (
('bridge', 0),
('ref', None),
('tunnel', 0),
('oneway', 0),
('z_order', 0),
),
mappings = [railways_gen0, mainroads_gen0, motorways_gen0],
)
landuse_gen1_valid = FixInvalidPolygons(
origin = landusages_gen1,
)
landuse_gen0_valid = FixInvalidPolygons(
origin = landusages_gen0,
)
|
|
#!/usr/bin/python
import problems
import solver
import observer
print 'Hello, OptSkills!'
NUM_TESTS = 11
NUM_TASKS = 6
MEAN_TYPE = 'linear'
PROBLEM_CODE = None
def save(prob, model, filename):
import json
with open(filename, 'w+') as fp:
data = {}
data['prob'] = repr(prob)
data['mean_type'] = repr(model.mean_type)
data['mean_params'] = repr(model.mean.params())
json.dump(data, fp)
def create_problem():
return eval(PROBLEM_CODE)
def create_solver(solver_name, prob):
print('create_solver: [%s]' % solver_name)
if solver_name == 'parameterized':
return solver.ParameterizedSolver(prob, NUM_TASKS, MEAN_TYPE)
elif solver_name == 'parameterized_cubic':
return solver.ParameterizedSolver(prob, NUM_TASKS, 'cubic')
elif solver_name == 'interpolation':
return solver.InterpolationSolver(prob, NUM_TASKS, MEAN_TYPE)
elif solver_name == 'direct':
return solver.DirectSolver(prob, NUM_TASKS, MEAN_TYPE)
elif solver_name == 'direct_cubic':
return solver.DirectSolver(prob, NUM_TASKS, 'cubic')
elif solver_name == 'sampler':
return solver.Sampler(prob, NUM_TASKS, MEAN_TYPE)
elif 'parameterized|' in solver_name:
alg = solver_name.split('|')[1]
print('create_solver: alg = %s' % alg)
return solver.ParameterizedSolver(prob, NUM_TASKS, MEAN_TYPE, alg)
else:
return None
def evaluate(name, plotting=True, exp_id=None):
import os
exp = '' if exp_id is None else '_%02d' % exp_id
obs_plot_values = observer.PlotValues('data_%s%s.csv' % (name, exp))
observers = [obs_plot_values, observer.PrintTime()]
observers += [observer.SaveModel('result_%s%s.json' % (name, exp))]
prob = create_problem()
s = create_solver(name, prob)
# if name == 'parameterized':
# observers += [observer.PlotMean('linear')]
for o in observers:
s.add_observer(o)
print(s)
res = s.solve()
print('==== respond from solver ====')
print(res)
if plotting:
obs_plot_values.plot(PROBLEM_CODE)
if hasattr(s, 'model'):
save(prob, s.model, 'result_%s.json' % name)
pid = os.getpid()
return (pid, name, obs_plot_values.data)
def benchmark(solvers):
# obs_plot_values = observer.PlotValues()
import time
begin_time = time.time()
print ('-' * 80)
print('all solvers:')
print('%s' % solvers)
print ('-' * 80)
results = [evaluate(s, False) for s in solvers]
print ('\n\n')
print ('-' * 80)
collected_data = {}
for i, res in enumerate(results):
(pid, solver_name, solver_data) = res
print i, pid, solver_data
# Merge solver data into one structure
for name, exp_list in solver_data.iteritems():
if name not in collected_data:
collected_data[name] = []
collected_data[name] += exp_list
print('-' * 80)
print('collected data: %s' % collected_data)
print ('-' * 80)
print ('plot...')
pl = observer.PlotValues()
pl.data = collected_data
pl.save('data_benchmark.csv')
pl.plot(PROBLEM_CODE)
print ('plot... done')
end_time = time.time()
print ('total %.4fs elapsed' % (end_time - begin_time))
def mpi_evaluate(solver_name):
import os
pid = os.getpid()
print('==== begin solver: %d ====' % pid)
obs_plot_values = observer.PlotValues()
observers = [obs_plot_values, observer.PrintTime()]
# prob = problems.Sphere()
prob = problems.MirroredSphere()
s = create_solver(solver_name, prob)
for o in observers:
s.add_observer(o)
res = s.solve()
print('==== respond from solver %d ====' % pid)
print(res)
return (pid, solver_name, obs_plot_values.data)
def mpi_benchmark(solvers, NUM_CORES=4):
# obs_plot_values = observer.PlotValues()
import multiprocessing as mp
import time
begin_time = time.time()
print ('-' * 80)
print('all solvers:')
print('%s' % solvers)
print ('-' * 80)
pool = mp.Pool(NUM_CORES)
results = pool.map(mpi_evaluate, solvers)
print ('\n\n')
print ('-' * 80)
collected_data = {}
for i, res in enumerate(results):
(pid, solver_name, solver_data) = res
print i, pid, solver_data
# Merge solver data into one structure
for name, exp_list in solver_data.iteritems():
if name not in collected_data:
collected_data[name] = []
collected_data[name] += exp_list
print('-' * 80)
print('collected data: %s' % collected_data)
print ('-' * 80)
print ('plot...')
pl = observer.PlotValues()
pl.data = collected_data
pl.plot(PROBLEM_CODE)
print ('plot... done')
end_time = time.time()
print ('total %.4fs elapsed' % (end_time - begin_time))
def plot(filename):
print('plot [%s]' % filename)
obs_plot_values = observer.PlotValues()
obs_plot_values.load(filename)
obs_plot_values.plot(PROBLEM_CODE, filename)
def copy_and_replot(expname):
import shutil
csv_filename = '%s.csv' % expname
png_filename = '%s.png' % expname
shutil.copy('data_benchmark.csv', csv_filename)
print ('copy data into %s' % csv_filename)
plot(csv_filename)
print ('plot data from %s' % csv_filename)
shutil.copy('plot_values.png', png_filename)
print ('copy plot into %s' % png_filename)
print ('done!')
def merge(output, keywords):
import glob
print('outputfile = %s' % output)
with open(output, 'w+') as fout:
for key in keywords:
for filename in glob.glob(key):
print('merge %s' % filename)
with open(filename) as fin:
fout.write(fin.read())
# PROBLEM_CODE = 'problems.Sphere(20)'
# PROBLEM_CODE = 'problems.Sphere(_seg_type="cubic")'
# PROBLEM_CODE = 'problems.MirroredSphere()'
# PROBLEM_CODE = 'problems.GPBow()'
# PROBLEM_CODE = 'problems.GPStep()'
# PROBLEM_CODE = 'problems.GPKick()'
# PROBLEM_CODE = 'problems.GPWalk()'
# PROBLEM_CODE = 'problems.GPJump()'
# PROBLEM_CODE = 'problems.SimJump()'
# PROBLEM_CODE = 'problems.Sphere(10)'
# PROBLEM_CODE = 'problems.CEC15(20, "bent_cigar")'
# PROBLEM_CODE = 'problems.CEC15(20, "weierstrass")'
PROBLEM_CODE = 'problems.CEC15(5, "schwefel")'
# PROBLEM_CODE = 'problems.CEC15(10, "hgbat")'
# seg = "[[-0.5, -0.1], [0.0, 0.1], [0.5, -0.1]]"
# adjust = "[0.5, 1.0]"
# PROBLEM_CODE = 'problems.CEC15(2, "bent_cigar", %s, "quadratic", 0.5, %s)' \
# % (seg, adjust)
# PROBLEM_CODE = 'problems.CEC15(2, "weierstrass")'
# seg = "[[-0.5, -0.1], [0.0, 0.1], [0.5, -0.1]]"
# adjust = "[0.5, 1.5]"
# PROBLEM_CODE = 'problems.CEC15(2, "weierstrass", %s, "quad", 0.01, %s)' \
# % (seg, adjust)
# seg = "[[-0.5, -0.1], [-0.4, 0.1]]"
# # PROBLEM_CODE = 'problems.CEC15(2, "weierstrass", %s, "linear", 1.0)' % seg
# PROBLEM_CODE = 'problems.CEC15(2, "schwefel", %s, "linear", 1.0)' % seg
# PROBLEM_CODE = 'problems.CEC15(2, "schwefel")'
# MEAN_TYPE = 'cubic'
math_problems = []
math_problems += [('problems.Sphere()', 'sphere')]
math_problems += [('problems.MirroredSphere()', 'mirrored')]
seg = "[[-0.5, -0.1], [-0.4, 0.1]]"
math_problems += [('problems.CEC15(2, "weierstrass", %s, "linear", 1.0)' % seg,
'weierstrass')]
math_problems += [('problems.CEC15(2, "schwefel", %s, "linear", 1.0)' % seg,
'schwefel')]
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
cmd = sys.argv[1]
if cmd == 'parameterized':
if len(sys.argv) == 2:
evaluate('parameterized')
else:
exp_id = int(sys.argv[2])
print('Experiment Id = %d' % exp_id)
evaluate('parameterized', exp_id=exp_id)
elif cmd == 'parameterized2':
if len(sys.argv) == 2:
evaluate('parameterized_cubic')
else:
exp_id = int(sys.argv[2])
print('Experiment Id = %d' % exp_id)
evaluate('parameterized_cubic', exp_id=exp_id)
elif cmd == 'direct':
if len(sys.argv) == 2:
evaluate('direct')
else:
exp_id = int(sys.argv[2])
print('Experiment Id = %d' % exp_id)
evaluate('direct', exp_id=exp_id)
elif cmd == 'direct2':
if len(sys.argv) == 2:
evaluate('direct_cubic')
else:
exp_id = int(sys.argv[2])
print('Experiment Id = %d' % exp_id)
evaluate('direct_cubic', exp_id=exp_id)
elif cmd == 'interpolation':
evaluate('interpolation')
elif cmd == 'benchmark':
times = 11 if len(sys.argv) == 2 else int(sys.argv[2])
print('Command = %s Times = %d' % (cmd, times))
benchmark(['parameterized', 'parameterized|cov_rank_1', 'direct']
* times)
elif cmd == 'plot':
filename = sys.argv[2]
plot(filename)
elif cmd == 'sampling':
evaluate('sampler', False)
elif cmd == 'merge':
merge(sys.argv[2], sys.argv[3:])
elif cmd == 'mergeplot':
merge(sys.argv[2], sys.argv[3:])
plot(sys.argv[2])
else:
print('Unknown command: %s' % cmd)
exit(0)
# evaluate('parameterized')
# evaluate('parameterized_cubic')
# evaluate('direct')
# evaluate('interpolation')
# evaluate('sampler', False)
# evaluate('parameterized|mean_best,step_1_5,cov_rank_1')
# evaluate('parameterized|mean_all,step_1_5,cov_all')
# evaluate('parameterized|mean_rand,step_success,cov_rank_1')
# mpi_benchmark(['parameterized'] * 11)
# mpi_benchmark(['parameterized', 'direct'] * 21)
# benchmark(['parameterized', 'direct'] * 5)
# mpi_benchmark(['parameterized', 'interpolation'] * 5)
# mpi_benchmark(['parameterized', 'direct', 'interpolation'] * 3, 1)
# benchmark(['parameterized'] * 11)
# benchmark(['parameterized', 'direct'] * 11)
# benchmark(['parameterized', 'parameterized_cubic'] * 11)
# benchmark(['parameterized|step_1_5',
# 'parameterized|step_success',
# 'direct'] * 21)
# copy_and_replot('stepalgs02_weierstrass')
print('sleep 1 seconds..')
import time
time.sleep(1)
# # A full benchmarks for covariance matrix algorithms
# for i in range(len(math_problems)):
# PROBLEM_CODE, shortname = math_problems[i]
# print('start %s' % shortname)
# # benchmark(['direct', 'parameterized|cov_rank_1'])
# benchmark(['parameterized|cov_rank_1',
# 'parameterized|cov_all',
# 'direct'] * 21)
# print('done with %s' % shortname)
# print('sleep 1 seconds..')
# time.sleep(1)
# copy_and_replot('covariance_prob%02d_%s' % (i, shortname))
# print('sleep 1 seconds..')
# time.sleep(1)
# A full benchmarks for all algorithms
for i in range(len(math_problems)):
if i != 3:
continue
PROBLEM_CODE, shortname = math_problems[i]
print('start %s' % shortname)
benchmark(['parameterized|draw_uniform,cov_rank1',
'parameterized|draw_uniform,cov_all',
'direct'] * 31)
print('done with %s' % shortname)
print('sleep 1 seconds..')
time.sleep(1)
copy_and_replot('cov_uniform_prob%02d_%s' % (i, shortname))
print('sleep 1 seconds..')
time.sleep(1)
|
|
'''
Convenience forms for adding and updating ``Event`` and ``Occurrence``s.
'''
import logging
from datetime import datetime, date, time, timedelta
from dateutil import rrule
from pprint import pformat
from django import forms
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils.translation import ugettext_lazy as _
from django.forms.extras.widgets import SelectDateWidget
from calendartools.constants import (
WEEKDAY_SHORT, WEEKDAY_LONG,
MONTH_SHORT, ORDINAL,
FREQUENCY_CHOICES, REPEAT_CHOICES, ISO_WEEKDAYS_MAP
)
from calendartools.fields import MultipleIntegerField
from calendartools.defaults import (
MINUTES_INTERVAL, SECONDS_INTERVAL, default_timeslot_offset_options,
MAX_OCCURRENCE_CREATION_COUNT, CALENDAR_APP_LABEL
)
from timezones.forms import TimeZoneField
log = logging.getLogger('calendartools.forms')
# Form Validaton Helpers:
greater_than_1 = MinValueValidator(1)
less_than_max = MaxValueValidator(MAX_OCCURRENCE_CREATION_COUNT - 1)
from django.db.models.loading import get_model
class AttendanceForm(forms.ModelForm):
# Necessary to hide all the other fields:
noop = forms.CharField(required=False, widget=forms.widgets.HiddenInput())
class Meta(object):
model = get_model(CALENDAR_APP_LABEL, 'Attendance')
fields = ['noop']
def clean(self):
if self.instance.pk and self.instance.status == self.instance.STATUS.booked:
self.instance.status = self.instance.STATUS.cancelled
return self.cleaned_data
class EventForm(forms.ModelForm):
'''
A simple form for adding and updating Event attributes
'''
class Meta(object):
model = get_model(CALENDAR_APP_LABEL, 'Event')
fields = ('name', 'description',)
class OccurrenceBaseForm(forms.Form):
def validate_occurrences(self):
if not self.is_valid():
raise ValueError
self.valid_occurrences = []
self.invalid_occurrences = []
# if not hasattr(self, 'invalid_occurrences'):
# self.invalid_occurrences = []
for oc in self.occurrences:
try:
oc.full_clean()
self.valid_occurrences.append(oc)
except forms.ValidationError, e:
errmsg = e.messages[0]
self.invalid_occurrences.append((oc, errmsg))
return (self.valid_occurrences, self.invalid_occurrences)
def save(self):
for occurrence in self.occurrences:
occurrence.save()
return self.occurrences
class MultipleOccurrenceForm(OccurrenceBaseForm):
"""
day
start_time_delta
end_time_delta
# recurrence options
repeats
count
until
freq
interval
# weekly options
week_days
# monthly options
month_option
month_ordinal
month_ordinal_day
each_month_day
# yearly options
year_months
is_year_month_ordinal
year_month_ordinal
year_month_ordinal_day
"""
calendar = forms.ModelChoiceField(get_model(CALENDAR_APP_LABEL, 'Calendar').objects.visible())
day = forms.DateField(
label=_(u'Date'),
initial=date.today,
widget=SelectDateWidget()
)
start_time_delta = forms.IntegerField(
label=_(u'Start time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
end_time_delta = forms.IntegerField(
label=_(u'End time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
# recurrence options
repeats = forms.ChoiceField(
choices=REPEAT_CHOICES,
initial='count',
label=_(u'Occurrences'),
widget=forms.RadioSelect()
)
count = forms.IntegerField(
label=_(u'Total Occurrences'),
initial=1,
required=False,
widget=forms.TextInput(attrs=dict(size=2, max_length=2)),
validators=[greater_than_1, less_than_max],
)
until = forms.DateField(
required=False,
initial=date.today,
widget=SelectDateWidget(),
)
freq = forms.IntegerField(
label=_(u'Frequency'),
initial=rrule.WEEKLY,
widget=forms.RadioSelect(choices=FREQUENCY_CHOICES),
)
interval = forms.IntegerField(
initial='1',
widget=forms.TextInput(attrs=dict(size=3, max_length=3)),
validators=[greater_than_1, less_than_max],
)
# weekly options
week_days = MultipleIntegerField(
WEEKDAY_SHORT,
label=_(u'Weekly options'),
widget=forms.CheckboxSelectMultiple
)
# monthly options
month_option = forms.ChoiceField(
choices=(('on',_(u'On the')), ('each',_(u'Each:'))),
initial='each',
required=False,
widget=forms.RadioSelect(),
label=_(u'Monthly options')
)
month_ordinal = forms.IntegerField(
required=False,
widget=forms.Select(choices=ORDINAL)
)
month_ordinal_day = forms.IntegerField(
required=False,
widget=forms.Select(choices=WEEKDAY_LONG)
)
each_month_day = MultipleIntegerField(
[(i,i) for i in range(1,32)],
widget=forms.CheckboxSelectMultiple
)
# yearly options
year_months = MultipleIntegerField(
MONTH_SHORT,
label=_(u'Yearly options'),
widget=forms.CheckboxSelectMultiple
)
is_year_month_ordinal = forms.BooleanField(required=False)
year_month_ordinal = forms.IntegerField(
required=False,
widget=forms.Select(choices=ORDINAL))
year_month_ordinal_day = forms.IntegerField(
required=False,
widget=forms.Select(choices=WEEKDAY_LONG)
)
def __init__(self, event, *args, **kws):
self.event = event
self.request = kws.pop('request', None)
self.user = getattr(self.request, 'user', None)
super(MultipleOccurrenceForm, self).__init__(*args, **kws)
self.fields['calendar'].choices = get_model(CALENDAR_APP_LABEL, 'Calendar'
).objects.visible(self.user).values_list('id', 'name')
dtstart = self.initial.get('dtstart', None)
if dtstart:
dtstart = dtstart.replace(
minute=((dtstart.minute // MINUTES_INTERVAL) * MINUTES_INTERVAL),
second=0,
microsecond=0
)
weekday = dtstart.isoweekday()
ordinal = dtstart.day // 7
ordinal = u'%d' % (-1 if ordinal > 3 else ordinal + 1,)
self.initial.setdefault('week_days', u'%d' % weekday)
self.initial.setdefault('month_ordinal', ordinal)
self.initial.setdefault('month_ordinal_day', u'%d' % weekday)
self.initial.setdefault('each_month_day', [u'%d' % dtstart.day])
self.initial.setdefault('year_months', [u'%d' % dtstart.month])
self.initial.setdefault('year_month_ordinal', ordinal)
self.initial.setdefault('year_month_ordinal_day', u'%d' % weekday)
offset = (dtstart - datetime.combine(dtstart.date(), time(0))).seconds
self.initial.setdefault('start_time_delta', u'%d' % offset)
self.initial.setdefault('end_time_delta', u'%d' % (
offset + SECONDS_INTERVAL,)
)
def add_field_error(self, fieldname, errmsg):
self.cleaned_data.pop(fieldname, None)
self._errors.setdefault(fieldname, self.error_class([errmsg]))
def check_for_required_fields(self):
"""Many fields on this form depend on the values of other fields to
determine whether they are required or not. This method handles those
checks as part of the cleaning process."""
if (not self.cleaned_data.get('repeats') or
self.cleaned_data.get('freq') is None):
# required fields not provided, let default validators handle:
return
required_errmsg = forms.Field.default_error_messages['required']
if (self.cleaned_data['repeats'] == 'count' and
not self.cleaned_data.get('count')):
self.add_field_error('count', required_errmsg)
if (self.cleaned_data['repeats'] == 'until' and
not self.cleaned_data.get('until')):
self.add_field_error('until', required_errmsg)
if (self.cleaned_data['freq'] == rrule.WEEKLY and
not self.cleaned_data.get('week_days')):
self.add_field_error('week_days', required_errmsg)
if (self.cleaned_data['freq'] == rrule.MONTHLY and
not self.cleaned_data.get('month_option')):
self.add_field_error('month_options', required_errmsg)
if (self.cleaned_data['freq'] == rrule.MONTHLY and
self.cleaned_data.get('month_option') == 'on' and
not self.cleaned_data.get('month_ordinal')):
self.add_field_error('month_ordinal', required_errmsg)
if (self.cleaned_data['freq'] == rrule.MONTHLY and
self.cleaned_data.get('month_option') == 'on' and
not self.cleaned_data.get('month_ordinal_day')):
self.add_field_error('month_ordinal_day', required_errmsg)
if (self.cleaned_data['freq'] == rrule.MONTHLY and
self.cleaned_data.get('month_option') == 'each' and
not self.cleaned_data.get('each_month_day')):
self.add_field_error('each_month_day', required_errmsg)
if (self.cleaned_data['freq'] == rrule.YEARLY and
self.cleaned_data.get('is_year_month_ordinal') is None):
self.add_field_error('is_year_month_ordinal', required_errmsg)
if (self.cleaned_data['freq'] == rrule.YEARLY and
self.cleaned_data.get('is_year_month_ordinal') and
not self.cleaned_data.get('year_month_ordinal')):
self.add_field_error('year_month_ordinal', required_errmsg)
if (self.cleaned_data['freq'] == rrule.YEARLY and
self.cleaned_data.get('is_year_month_ordinal') and
not self.cleaned_data.get('year_month_ordinal_day')):
self.add_field_error('year_month_ordinal_day', required_errmsg)
if (self.cleaned_data['freq'] == rrule.YEARLY and
self.cleaned_data.get('is_year_month_ordinal') is False and
not self.cleaned_data.get('year_months')):
self.add_field_error('year_months', required_errmsg)
def check_until_later_than_finish_datetime(self):
repeats = self.cleaned_data.get('repeats')
until = self.cleaned_data.get('until')
if (until and repeats and repeats == 'until'
and until <= self.cleaned_data['end_time'].date()):
raise forms.ValidationError(_("'Until' date must occur in the future."))
def clean(self):
self.check_for_required_fields()
day = datetime.combine(self.cleaned_data['day'], time(0))
self.cleaned_data['start_time'] = day + timedelta(
seconds=self.cleaned_data['start_time_delta']
)
self.cleaned_data['end_time'] = day + timedelta(
seconds=self.cleaned_data['end_time_delta']
)
self.check_until_later_than_finish_datetime()
log.debug("Recurrence-form, Cleaned Data\n%s" % (
pformat(self.cleaned_data))
)
return self.cleaned_data
def _post_clean(self):
if self._errors:
return
if self.cleaned_data['repeats'] == 'no':
self.rrules = {}
else:
self.rrules = self._build_rrule_params()
self.occurrences = self.event.add_occurrences(
self.cleaned_data['calendar'],
self.cleaned_data['start_time'],
self.cleaned_data['end_time'],
commit=False,
**self.rrules
)
self.validate_occurrences()
return self.occurrences
def validate_occurrences(self):
if not self.is_valid():
raise ValueError
self.valid_occurrences = []
self.invalid_occurrences = []
for oc in self.occurrences:
try:
oc.full_clean()
self.valid_occurrences.append(oc)
except forms.ValidationError, e:
errmsg = e.messages[0]
self.invalid_occurrences.append((oc, errmsg))
return (self.valid_occurrences, self.invalid_occurrences)
def save(self):
for occurrence in self.occurrences:
occurrence.save()
return self.occurrences
def _build_rrule_params(self):
iso = ISO_WEEKDAYS_MAP
data = self.cleaned_data
params = dict(
freq=data['freq'],
interval=data['interval'] or 1
)
if self.cleaned_data['repeats'] == 'count':
params['count'] = data['count']
elif self.cleaned_data['repeats'] == 'until':
params['until'] = data['until']
if params['freq'] == rrule.WEEKLY:
params['byweekday'] = [iso[n] for n in data['week_days']]
elif params['freq'] == rrule.MONTHLY:
if 'on' == data['month_option']:
ordinal = data['month_ordinal']
day = iso[data['month_ordinal_day']]
params['byweekday'] = day(ordinal)
else:
params['bymonthday'] = data['each_month_day']
elif params['freq'] == rrule.YEARLY:
params['bymonth'] = data['year_months']
if data['is_year_month_ordinal']:
ordinal = data['year_month_ordinal']
day = iso[data['year_month_ordinal_day']]
params['byweekday'] = day(ordinal)
elif params['freq'] != rrule.DAILY:
raise NotImplementedError(
_(u'Unknown interval rule %s') % params['freq']
)
return params
class ConfirmOccurrenceForm(OccurrenceBaseForm):
def __init__(self, event, valid_occurrences, invalid_occurrences, *args, **kws):
self.event = event
self.occurrences = valid_occurrences
self.invalid_occurrences = invalid_occurrences
super(ConfirmOccurrenceForm, self).__init__(*args, **kws)
def _post_clean(self):
if self._errors:
return
self.validate_occurrences()
return self.occurrences
class TimeZoneForm(forms.Form):
timezone = TimeZoneField()
|
|
# -*- coding: utf-8 -*-
from .Qt import QtCore, QtGui
from .Vector import Vector
from .SRTTransform import SRTTransform
import pyqtgraph as pg
import numpy as np
import scipy.linalg
class SRTTransform3D(pg.Transform3D):
"""4x4 Transform matrix that can always be represented as a combination of 3 matrices: scale * rotate * translate
This transform has no shear; angles are always preserved.
"""
def __init__(self, init=None):
pg.Transform3D.__init__(self)
self.reset()
if init is None:
return
if init.__class__ is QtGui.QTransform:
init = SRTTransform(init)
if isinstance(init, dict):
self.restoreState(init)
elif isinstance(init, SRTTransform3D):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(init._state['axis']),
}
self.update()
elif isinstance(init, SRTTransform):
self._state = {
'pos': Vector(init._state['pos']),
'scale': Vector(init._state['scale']),
'angle': init._state['angle'],
'axis': Vector(0, 0, 1),
}
self._state['scale'][2] = 1.0
self.update()
elif isinstance(init, QtGui.QMatrix4x4):
self.setFromMatrix(init)
else:
raise Exception("Cannot build SRTTransform3D from argument type:", type(init))
def getScale(self):
return pg.Vector(self._state['scale'])
def getRotation(self):
"""Return (angle, axis) of rotation"""
return self._state['angle'], pg.Vector(self._state['axis'])
def getTranslation(self):
return pg.Vector(self._state['pos'])
def reset(self):
self._state = {
'pos': Vector(0,0,0),
'scale': Vector(1,1,1),
'angle': 0.0, ## in degrees
'axis': (0, 0, 1)
}
self.update()
def translate(self, *args):
"""Adjust the translation of this transform"""
t = Vector(*args)
self.setTranslate(self._state['pos']+t)
def setTranslate(self, *args):
"""Set the translation of this transform"""
self._state['pos'] = Vector(*args)
self.update()
def scale(self, *args):
"""adjust the scale of this transform"""
## try to prevent accidentally setting 0 scale on z axis
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
s = Vector(*args)
self.setScale(self._state['scale'] * s)
def setScale(self, *args):
"""Set the scale of this transform"""
if len(args) == 1 and hasattr(args[0], '__len__'):
args = args[0]
if len(args) == 2:
args = args + (1,)
self._state['scale'] = Vector(*args)
self.update()
def rotate(self, angle, axis=(0,0,1)):
"""Adjust the rotation of this transform"""
origAxis = self._state['axis']
if axis[0] == origAxis[0] and axis[1] == origAxis[1] and axis[2] == origAxis[2]:
self.setRotate(self._state['angle'] + angle)
else:
m = QtGui.QMatrix4x4()
m.translate(*self._state['pos'])
m.rotate(self._state['angle'], *self._state['axis'])
m.rotate(angle, *axis)
m.scale(*self._state['scale'])
self.setFromMatrix(m)
def setRotate(self, angle, axis=(0,0,1)):
"""Set the transformation rotation to angle (in degrees)"""
self._state['angle'] = angle
self._state['axis'] = Vector(axis)
self.update()
def setFromMatrix(self, m):
"""
Set this transform mased on the elements of *m*
The input matrix must be affine AND have no shear,
otherwise the conversion will most likely fail.
"""
for i in range(4):
self.setRow(i, m.row(i))
m = self.matrix().reshape(4,4)
## translation is 4th column
self._state['pos'] = m[:3,3]
## scale is vector-length of first three columns
scale = (m[:3,:3]**2).sum(axis=0)**0.5
## see whether there is an inversion
z = np.cross(m[0, :3], m[1, :3])
if np.dot(z, m[2, :3]) < 0:
scale[1] *= -1 ## doesn't really matter which axis we invert
self._state['scale'] = scale
## rotation axis is the eigenvector with eigenvalue=1
r = m[:3, :3] / scale[:, np.newaxis]
try:
evals, evecs = scipy.linalg.eig(r)
except:
print("Rotation matrix: %s" % str(r))
print("Scale: %s" % str(scale))
print("Original matrix: %s" % str(m))
raise
eigIndex = np.argwhere(np.abs(evals-1) < 1e-6)
if len(eigIndex) < 1:
print("eigenvalues: %s" % str(evals))
print("eigenvectors: %s" % str(evecs))
print("index: %s, %s" % (str(eigIndex), str(evals-1)))
raise Exception("Could not determine rotation axis.")
axis = evecs[:,eigIndex[0,0]].real
axis /= ((axis**2).sum())**0.5
self._state['axis'] = axis
## trace(r) == 2 cos(angle) + 1, so:
cos = (r.trace()-1)*0.5 ## this only gets us abs(angle)
## The off-diagonal values can be used to correct the angle ambiguity,
## but we need to figure out which element to use:
axisInd = np.argmax(np.abs(axis))
rInd,sign = [((1,2), -1), ((0,2), 1), ((0,1), -1)][axisInd]
## Then we have r-r.T = sin(angle) * 2 * sign * axis[axisInd];
## solve for sin(angle)
sin = (r-r.T)[rInd] / (2. * sign * axis[axisInd])
## finally, we get the complete angle from arctan(sin/cos)
self._state['angle'] = np.arctan2(sin, cos) * 180 / np.pi
if self._state['angle'] == 0:
self._state['axis'] = (0,0,1)
def as2D(self):
"""Return a QTransform representing the x,y portion of this transform (if possible)"""
return pg.SRTTransform(self)
#def __div__(self, t):
#"""A / B == B^-1 * A"""
#dt = t.inverted()[0] * self
#return SRTTransform(dt)
#def __mul__(self, t):
#return SRTTransform(QtGui.QTransform.__mul__(self, t))
def saveState(self):
p = self._state['pos']
s = self._state['scale']
ax = self._state['axis']
#if s[0] == 0:
#raise Exception('Invalid scale: %s' % str(s))
return {
'pos': (p[0], p[1], p[2]),
'scale': (s[0], s[1], s[2]),
'angle': self._state['angle'],
'axis': (ax[0], ax[1], ax[2])
}
def restoreState(self, state):
self._state['pos'] = Vector(state.get('pos', (0.,0.,0.)))
scale = state.get('scale', (1.,1.,1.))
scale = tuple(scale) + (1.,) * (3-len(scale))
self._state['scale'] = Vector(scale)
self._state['angle'] = state.get('angle', 0.)
self._state['axis'] = state.get('axis', (0, 0, 1))
self.update()
def update(self):
pg.Transform3D.setToIdentity(self)
## modifications to the transform are multiplied on the right, so we need to reverse order here.
pg.Transform3D.translate(self, *self._state['pos'])
pg.Transform3D.rotate(self, self._state['angle'], *self._state['axis'])
pg.Transform3D.scale(self, *self._state['scale'])
def __repr__(self):
return str(self.saveState())
def matrix(self, nd=3):
if nd == 3:
return np.array(self.copyDataTo()).reshape(4,4)
elif nd == 2:
m = np.array(self.copyDataTo()).reshape(4,4)
m[2] = m[3]
m[:,2] = m[:,3]
return m[:3,:3]
else:
raise Exception("Argument 'nd' must be 2 or 3")
if __name__ == '__main__':
import widgets
import GraphicsView
from functions import *
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
win.show()
cw = GraphicsView.GraphicsView()
#cw.enableMouse()
win.setCentralWidget(cw)
s = QtGui.QGraphicsScene()
cw.setScene(s)
win.resize(600,600)
cw.enableMouse()
cw.setRange(QtCore.QRectF(-100., -100., 200., 200.))
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
self.b = QtGui.QGraphicsRectItem(20, 20, 20, 20, self)
self.b.setPen(QtGui.QPen(mkPen('y')))
self.t1 = QtGui.QGraphicsTextItem(self)
self.t1.setHtml('<span style="color: #F00">R</span>')
self.t1.translate(20, 20)
self.l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0, self)
self.l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10, self)
self.l1.setPen(QtGui.QPen(mkPen('y')))
self.l2.setPen(QtGui.QPen(mkPen('y')))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
#s.addItem(b)
#s.addItem(t1)
item = Item()
s.addItem(item)
l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0)
l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10)
l1.setPen(QtGui.QPen(mkPen('r')))
l2.setPen(QtGui.QPen(mkPen('r')))
s.addItem(l1)
s.addItem(l2)
tr1 = SRTTransform()
tr2 = SRTTransform()
tr3 = QtGui.QTransform()
tr3.translate(20, 0)
tr3.rotate(45)
print("QTransform -> Transform: %s" % str(SRTTransform(tr3)))
print("tr1: %s" % str(tr1))
tr2.translate(20, 0)
tr2.rotate(45)
print("tr2: %s" % str(tr2))
dt = tr2/tr1
print("tr2 / tr1 = %s" % str(dt))
print("tr2 * tr1 = %s" % str(tr2*tr1))
tr4 = SRTTransform()
tr4.scale(-1, 1)
tr4.rotate(30)
print("tr1 * tr4 = %s" % str(tr1*tr4))
w1 = widgets.TestROI((19,19), (22, 22), invertible=True)
#w2 = widgets.TestROI((0,0), (150, 150))
w1.setZValue(10)
s.addItem(w1)
#s.addItem(w2)
w1Base = w1.getState()
#w2Base = w2.getState()
def update():
tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
item.setTransform(tr1)
#def update2():
#tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
#t1.setTransform(tr1)
#w1.setState(w1Base)
#w1.applyGlobalTransform(tr2)
w1.sigRegionChanged.connect(update)
#w2.sigRegionChanged.connect(update2)
|
|
# -*- coding: utf-8 -*-
# vim: set ts=4 et
import os
import re
import sqlite3
from plugin import *
from . import expression
VAR_TYPE_INT = 0
VAR_TYPE_FLOAT = 1
VAR_TYPE_COMPLEX = 2
VAR_TYPE_STR = 3
class Plugin(BasePlugin):
def on_load(self, reload):
self.db = sqlite3.connect(os.path.join(self.bot.core.data_path, 'math.db'))
c = self.db.cursor()
c.execute('''
CREATE TABLE IF NOT EXISTS Workbook (
WorkbookId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
Name VARCHAR(100) NOT NULL
);''')
c.execute('''
CREATE TABLE IF NOT EXISTS Funcs (
WorkbookId INTEGER NOT NULL REFERENCES Workbook(WorkbookId),
Name VARCHAR(100) NOT NULL,
Args VARCHAR(100) NOT NULL,
Expr VARCHAR(100) NOT NULL,
Desc VARCHAR(100)
);''')
c.execute('''
CREATE TABLE IF NOT EXISTS Vars (
WorkbookId INTEGER NOT NULL REFERENCES Workbook(WorkbookId),
Name VARCHAR(100) NOT NULL,
Type INTEGER NOT NULL,
Value VARCHAR(100) NOT NULL
);''')
self.db.commit()
self.workbooks = {}
self.target_to_workbook = {}
def on_unload(self, reload):
self.db.close()
def load_workbook(self, target, name):
if ':' not in name:
name = target + ':' + name
if name in self.workbooks:
self.target_to_workbook[target] = self.workbooks[name]
return self.target_to_workbook[target]
self.target_to_workbook[target] = self.workbooks[name] = workbook = {}
workbook['name'] = name
workbook['exc_handler'] = self.exc_handler
workbook['globals'] = expression.BUILTIN_VARS.copy()
workbook['funcs'] = expression.BUILTIN_FUNCS.copy()
c = self.db.cursor()
c.execute('SELECT WorkbookId FROM Workbook WHERE Name=:name', workbook)
row = c.fetchone()
if not row:
c.execute('INSERT INTO Workbook (Name) ' \
'VALUES (:name)', workbook)
self.db.commit()
workbook['id'] = c.lastrowid
return workbook
workbook['id'] = row[0]
c.execute('SELECT Name, Type, Value FROM Vars WHERE WorkbookId=:id', workbook)
for row in c.fetchall():
name, type_, value = row
if type_ == VAR_TYPE_INT:
value = int(value)
elif type_ == VAR_TYPE_FLOAT:
value = float(value)
elif type_ == VAR_TYPE_COMPLEX:
value = complex(value)
else:
value = str(value)
workbook['globals'][str(name)] = value
c.execute('SELECT Name, Args, Expr, Desc FROM Funcs WHERE WorkbookId=:id', workbook)
for row in c.fetchall():
name, args, expr, desc = str(row[0]), str(row[1]), str(row[2]), str(row[3])
expression.define_func(workbook, name, args, expr, desc)
return workbook
def get_workbook(self, target):
if target in self.target_to_workbook:
return self.target_to_workbook[target]
return self.load_workbook(target, 'Default')
def exc_handler(self, name, args, exc, workbook, expr):
self.lastmsg.reply('Error: ' + str(exc))
if expr:
self.lastmsg.reply(' ' + expr)
self.lastmsg.reply(' ' * (exc.pos + 2) + '^')
def define_func(self, msg, workbook, name, args, expr):
try:
expression.define_func(workbook, name, args, expr)
c = self.db.cursor()
c.execute('INSERT INTO Funcs VALUES (?, ?, ?, ?, ?)', \
(workbook['id'], name, args, expr, ''))
self.db.commit()
except expression.DeclarationError as exc:
msg.reply('Error: ' + str(exc))
except expression.ExpressionError as exc:
self.exc_handler('', [], exc, workbook, expr)
def undefine_func(self, msg, workbook, name):
try:
expression.undefine_func(workbook, name)
c = self.db.cursor()
c.execute('DELETE FROM Funcs WHERE WorkbookId=? and Name=?', \
(workbook['id'], name))
self.db.commit()
except expression.DeclarationError as exc:
msg.reply('Error: ' + str(exc))
def define_var(self, msg, workbook, name, expr):
try:
expression.define_var(workbook, name, expr)
value = workbook['globals'][name]
if type(value) in [int, int]:
type_ = VAR_TYPE_INT
elif type(value) == float:
type_ = VAR_TYPE_FLOAT
elif type(value) == complex:
type_ = VAR_TYPE_COMPLEX
else:
type_ = VAR_TYPE_STR
c = self.db.cursor()
c.execute('INSERT INTO Vars VALUES (?, ?, ?, ?)', (workbook['id'], \
name, type_, str(value)))
self.db.commit()
except expression.DeclarationError as exc:
msg.reply('Error: ' + str(exc))
except expression.ExpressionError as exc:
self.exc_handler('', [], exc, workbook, expr)
def undefine_var(self, msg, workbook, name):
try:
expression.undefine_var(workbook, name)
c = self.db.cursor()
c.execute('DELETE FROM Vars WHERE WorkbookId=? and Name=?', \
(workbook['id'], name))
self.db.commit()
except expression.DeclarationError as exc:
msg.reply('Error: ' + str(exc))
@hook
def math_trigger(self, msg, args, argstr):
self.lastmsg = msg
workbook = self.get_workbook(msg.reply_to)
line = str(argstr).strip()
m = re.match('(\w+)\(([\w, ]*)\)\s*=\s*(.*)', line)
if m:
name, args, expr = m.groups()
expr = expr.strip()
self.undefine_func(msg, workbook, name)
if expr:
self.define_func(msg, workbook, name, args, expr)
return
m = re.match('(\w+)\s*=\s*(.*)', line)
if m:
name, expr = m.groups()
expr = expr.strip()
self.undefine_var(msg, workbook, name)
if expr:
self.define_var(msg, workbook, name, expr)
return
try:
expr = line
tokens = expression.parse_expr(expr)
compiled = expression.compile_expr(tokens)
value = compiled(workbook)
except expression.ExpressionError as exc:
self.exc_handler('', [], exc, workbook, expr)
return
except Exception as exc:
msg.reply('Error: ' + str(exc))
return
msg.reply(str(value))
@hook
def math_workbook_trigger(self, msg, args, argstr):
if len(args) <= 1:
workbook = self.get_workbook(msg.reply_to)
msg.reply("%s workbook, %d vars, %d funcs" % (workbook['name'], \
len(workbook['globals']), len(workbook['funcs'])))
return True
self.load_workbook(msg.reply_to, args[1])
return True
@hook
def math_varlist_trigger(self, msg, args, argstr):
workbook = self.get_workbook(msg.reply_to)
names = list(workbook.get('globals', {}).keys())
names.sort()
msg.reply(', '.join(names))
return True
@hook
def math_funclist_trigger(self, msg, args, argstr):
workbook = self.get_workbook(msg.reply_to)
names = list(workbook.get('funcs', {}).keys())
names.sort()
msg.reply(', '.join(names))
return True
@hook
def math_describe_trigger(self, msg, args, argstr):
workbook = self.get_workbook(msg.reply_to)
if len(args) < 2:
msg.reply('a func name is required')
return True
name = args[1]
funcs = workbook.get('funcs', {})
if name not in funcs:
msg.reply(name + ' is not a defined func')
return True
func = funcs[name]
if len(args) < 3:
if 'expr' in func:
args = func.get('args', ())
msg.reply('%s(%s) = %s' % (name, ', '.join(args), func['expr']))
else:
msg.reply(name + ' is a python func')
if 'desc' in func:
msg.reply(func['desc'])
return True
func['desc'] = argstr[len(args[1]):].strip()
c = self.db.cursor()
c.execute('UPDATE Funcs SET Desc=? WHERE WorkbookId=? and Name=?', \
(func['desc'], workbook['id'], name))
self.db.commit()
return True
|
|
# -*- coding: utf-8 -*-
import pytest
from ethereum import tester
from ethereum.utils import encode_hex, sha3
from raiden.utils import get_contract_path, privatekey_to_address
from raiden.encoding.signing import GLOBAL_CTX
from ethereum.tester import ABIContract, ContractTranslator, TransactionFailed
from secp256k1 import PrivateKey
from raiden.tests.utils.tester import new_channelmanager
def test_channelnew_event(
settle_timeout,
tester_state,
tester_events,
tester_registry,
tester_token):
privatekey0 = tester.DEFAULT_KEY
address0 = tester.DEFAULT_ACCOUNT
address1 = tester.a1
channel_manager = new_channelmanager(
privatekey0,
tester_state,
tester_events.append,
tester_registry,
tester_token,
)
netting_channel_address1_hex = channel_manager.newChannel(
address1,
settle_timeout,
sender=privatekey0,
)
last_event = tester_events[-1]
assert last_event == {
'_event_type': 'ChannelNew',
'netting_channel': netting_channel_address1_hex,
'participant1': encode_hex(address0),
'participant2': encode_hex(address1),
'settle_timeout': settle_timeout,
}
def test_channelmanager(
tester_state,
tester_token,
tester_events,
tester_channelmanager_library_address,
settle_timeout,
netting_channel_abi): # pylint: disable=too-many-locals,too-many-statements
address0 = tester.DEFAULT_ACCOUNT
address1 = tester.a1
address2 = tester.a2
nonexisting_address = sha3('this_does_not_exist')[:20]
channelmanager_path = get_contract_path('ChannelManagerContract.sol')
channel_manager = tester_state.abi_contract(
None,
path=channelmanager_path,
language='solidity',
constructor_parameters=[tester_token.address],
contract_name='ChannelManagerContract',
log_listener=tester_events.append,
libraries={
'ChannelManagerLibrary': tester_channelmanager_library_address.encode('hex'),
}
)
participants_count = len(channel_manager.getChannelsParticipants())
assert participants_count == 0, 'newly deployed contract must be empty'
netting_channel_translator = ContractTranslator(netting_channel_abi)
previous_events = list(tester_events)
netting_channel_address1_hex = channel_manager.newChannel(
address1,
settle_timeout,
)
assert len(previous_events) + 1 == len(tester_events), 'ChannelNew event must be fired.'
channelnew_event = tester_events[-1]
assert channelnew_event == {
'_event_type': 'ChannelNew',
'participant1': address0.encode('hex'),
'participant2': address1.encode('hex'),
'netting_channel': netting_channel_address1_hex,
'settle_timeout': settle_timeout,
}
# should fail if settleTimeout is too low
with pytest.raises(TransactionFailed):
channel_manager.newChannel(address1, 5)
# cannot have two channels at the same time
with pytest.raises(TransactionFailed):
channel_manager.newChannel(address1, settle_timeout)
# should be zero address if there is no channel for the given address
assert channel_manager.getChannelWith(nonexisting_address) == '0' * 40
assert len(channel_manager.getChannelsParticipants()) == 2
netting_contract_proxy1 = ABIContract(
tester_state,
netting_channel_translator,
netting_channel_address1_hex,
)
assert netting_contract_proxy1.settleTimeout() == settle_timeout
previous_events = list(tester_events)
netting_channel_address2_hex = channel_manager.newChannel(
address2,
settle_timeout,
)
assert len(previous_events) + 1 == len(tester_events), 'ChannelNew event must be fired.'
assert channel_manager.getChannelWith(address1) == netting_channel_address1_hex
assert channel_manager.getChannelWith(address2) == netting_channel_address2_hex
msg_sender_channels = channel_manager.nettingContractsByAddress(tester.DEFAULT_ACCOUNT)
address1_channels = channel_manager.nettingContractsByAddress(address1)
nonexisting_channels = channel_manager.nettingContractsByAddress(nonexisting_address)
assert len(msg_sender_channels) == 2
assert len(address1_channels) == 1
assert len(nonexisting_channels) == 0
assert len(channel_manager.getChannelsParticipants()) == 4
channelnew_event = tester_events[-1]
assert channelnew_event == {
'_event_type': 'ChannelNew',
'participant1': address0.encode('hex'),
'participant2': address2.encode('hex'),
'netting_channel': netting_channel_address2_hex,
'settle_timeout': settle_timeout,
}
def test_reopen_channel(
tester_state,
tester_events,
tester_channelmanager,
tester_channels,
settle_timeout,
netting_channel_abi):
privatekey0_raw, privatekey1_raw, nettingchannel, channel0, _ = tester_channels[0]
privatekey0 = PrivateKey(privatekey0_raw, ctx=GLOBAL_CTX, raw=True)
address0 = privatekey_to_address(privatekey0_raw)
address1 = privatekey_to_address(privatekey1_raw)
address2 = tester.a2
# We need to close the channel before it can be deleted, to do so we need
# one transfer to pass in close()
transfer_amount = 10
identifier = 1
direct_transfer = channel0.create_directtransfer(
transfer_amount,
identifier,
)
direct_transfer.sign(privatekey0, address0)
direct_transfer_data = str(direct_transfer.packed().data)
should_be_nonce = nettingchannel.opened(sender=privatekey0_raw) * (2**32)
should_be_nonce_plus_one = (nettingchannel.opened(sender=privatekey0_raw) + 1) * (2**32)
assert should_be_nonce <= direct_transfer.nonce < should_be_nonce_plus_one
# settle the channel should not change the channel manager state
nettingchannel.close(
direct_transfer_data,
"",
sender=privatekey1_raw,
)
tester_state.mine(number_of_blocks=settle_timeout + 1)
nettingchannel.settle(sender=privatekey0_raw)
tester_state.mine(1)
# now a single new channel can be opened
# if channel with address is settled a new can be opened
# old entry will be deleted when calling newChannel
netting_channel_address1_hex = tester_channelmanager.newChannel(
address1,
settle_timeout,
sender=privatekey0_raw,
)
channeldelete_event = tester_events[-2]
assert channeldelete_event == {
'_event_type': 'ChannelDeleted',
'caller_address': address0.encode('hex'),
'partner': address1.encode('hex')
}
netting_channel_translator = ContractTranslator(netting_channel_abi)
netting_contract_proxy1 = ABIContract(
tester_state,
netting_channel_translator,
netting_channel_address1_hex,
)
# transfer not in nonce range
with pytest.raises(TransactionFailed):
netting_contract_proxy1.close(
direct_transfer_data,
"",
sender=privatekey0_raw,
)
# channel already exists
with pytest.raises(TransactionFailed):
tester_channelmanager.newChannel(
address1,
settle_timeout,
sender=privatekey0_raw,
)
# opening a new channel that did not exist before
tester_channelmanager.newChannel(
address2,
settle_timeout,
sender=privatekey0_raw,
)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The VCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction
import cStringIO
import binascii
def txFromHex(hexstring):
tx = CTransaction()
f = cStringIO.StringIO(binascii.unhexlify(hexstring))
tx.deserialize(f)
return tx
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(VCoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = binascii.hexlify(tx3_modified.serialize()).decode('utf-8')
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= 0.004*100000000 # bump the fee
tx3_b = binascii.hexlify(tx3_b.serialize()).decode('utf-8')
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import array
import subprocess
import sys
import corepy.arch.x86.isa as x86
from corepy.arch.x86.types.registers import *
import corepy.arch.x86.platform as env
from corepy.arch.x86.lib.memory import MemRef
import corepy.lib.printer as printer
def get_nasm_output(code, inst):
"""Take an instruction, and return a hex string of its encoding, as encoded by GAS"""
fd = open("x86_test.s", "w")
printer.PrintInstructionStream(code, printer.x86_Nasm(function_name="_start"), fd = fd)
fd.close()
ret = subprocess.call(["nasm", "-Ox", "x86_test.s"])
if ret != 0:
return
output = subprocess.Popen(["xxd", "-ps", "x86_test"], stdout=subprocess.PIPE).communicate()[0]
hex = ''.join(output.splitlines())
# If the prolog/epilog change, these need to be updated
startstr = "5589e5575653"
stopstr = "5b5e5fc9c3"
startpos = hex.find(startstr) + len(startstr)
stoppos = hex.find(stopstr)
return hex[startpos:stoppos]
def get_corepy_output(code, inst):
"""Take an instruction, and return a hex string of its encoding, as encoded by CorePy"""
hex_list = inst.render()
hex = ""
for x in hex_list:
hex += "%02x" % (x)
return hex
def ops_from_sig(code, sig):
ops = []
for s in sig:
if isinstance(s, x86.x86RegisterOperand):
if s == x86.reg32_t:
ops.append(edx)
elif s == x86.reg16_t:
ops.append(cx)
elif s == x86.reg8_t:
ops.append(bl)
elif s == x86.regst_t:
ops.append(st1)
elif s == x86.mmx_t:
ops.append(mm3)
elif s == x86.xmm_t:
ops.append(xmm5)
elif isinstance(s, x86.FixedRegisterOperand):
ops.append(globals()[s.name])
elif isinstance(s, x86.x86ConstantOperand):
ops.append(s.const)
elif isinstance(s, x86.x86MemoryOperand):
if s == x86.mem128_t:
ops.append(MemRef(eax, -16, data_size = 128))
elif s == x86.mem64_t:
ops.append(MemRef(ebx, 32, data_size = 64))
elif s == x86.mem32_t:
ops.append(MemRef(ecx, 1024, data_size = 32))
elif s == x86.mem16_t:
ops.append(MemRef(esi, data_size = 16))
elif s == x86.mem8_t:
ops.append(MemRef(ebp, -8, data_size = 8))
elif s == x86.mem80_t:
ops.append(MemRef(edi, data_size = 80))
elif s == x86.mem_t:
ops.append(MemRef(edx, data_size = None))
else:
ops.append(MemRef(esp, data_size = s.size))
elif isinstance(s, x86.Imm8):
ops.append(13)
elif isinstance(s, x86.Imm16):
ops.append(10234)
elif isinstance(s, x86.Imm32):
ops.append(0x1EADBEEF)
elif isinstance(s, x86.Rel8off):
ops.append(4)
elif isinstance(s, x86.Rel16off):
ops.append(260)
elif isinstance(s, x86.Rel32off):
ops.append(65541)
elif isinstance(s, x86.x86ImmediateOperand):
ops.append(21)
elif isinstance(s, x86.x86LabelOperand):
ops.append(code.lbl_body)
else:
raise Exception("unhandled operand %s" % str(s))
return ops
def test_inst(code, inst):
code.add(inst)
code.cache_code()
nasm_hex_str = get_nasm_output(code, inst)
corepy_hex_str = get_corepy_output(code, inst)
if nasm_hex_str == None:
print "*************************** NASM ERROR"
print "corepy output:", corepy_hex_str
printer.PrintInstructionStream(code,
printer.x86_Nasm(show_epilogue = False, show_prologue = False))
return 'nasm_fail'
elif nasm_hex_str == corepy_hex_str:
print "PASS"
return 'pass'
else:
#nasm_rex = int(nasm_hex_str[0:2], 16)
#corepy_rex = int(corepy_hex_str[0:2], 16)
#if corepy_rex - nasm_rex == 8 and (nasm_rex & 0xF0 == 0x40):
# print "WARNING CorePy is enabling 64bit for this inst, NASM is not"
# print "nasm output: ", nasm_hex_str
# print "corepy output:", corepy_hex_str
# return 'rex_pass'
#else:
print "*************************** ERROR"
print "nasm output: ", nasm_hex_str
print "corepy output:", corepy_hex_str
printer.PrintInstructionStream(code,
printer.x86_Nasm(show_epilogue = False, show_prologue = False))
return 'fail'
return
# TODO - would like to be able to test multiple values for an operand. ie regs
# that exercise REX differently, and forward/backward labels
# how would this be done?
if __name__ == '__main__':
results = {'pass':0, 'rex_pass':0, 'nasm_fail':0, 'fail':0}
#classes = [getattr(x86, cls) for cls in dir(x86) if isinstance(getattr(x86, cls), type) and issubclass(getattr(x86, cls), (x86.x86DispatchInstruction, x86.x86Instruction))]
classes = []
for obj in dir(x86):
cls = getattr(x86, obj)
if isinstance(cls, type):
if issubclass(cls, (x86.x86DispatchInstruction, x86.x86Instruction)):
if cls != x86.x86DispatchInstruction and cls != x86.x86Instruction:
classes.append(cls)
code = env.InstructionStream()
for c in classes:
if c == x86.int_3:
# No way to write 'int 3' for NASM since it clashes with 'int 3' (heh)
# So just make sure it gets rendered as 0xCC and call it a day
inst = x86.int_3()
code.add(inst)
corepy_hex_str = get_corepy_output(code, inst)
if corepy_hex_str == 'cc':
print "PASS"
results['pass'] += 1
else:
print "*************************** ERROR"
print "corepy output:", corepy_hex_str
results['pass'] += 1
elif issubclass(c, x86.x86DispatchInstruction):
for d in c.dispatch:
code.reset()
ops = ops_from_sig(code, d[0].signature)
inst = c(*ops)
print "Testing instruction:", inst
r = test_inst(code, inst)
results[r] += 1
sys.stdout.flush()
sys.stderr.flush()
elif issubclass(c, x86.x86Instruction):
code.reset()
ops = ops_from_sig(code, c.machine_inst.signature)
inst = c(*ops)
print "Testing instruction:", inst
r = test_inst(code, inst)
results[r] += 1
sys.stdout.flush()
sys.stderr.flush()
print "%d passes %d rex_passes" % (results['pass'], results['rex_pass'])
print "%d failures %d NASM failures" % (results['fail'], results['nasm_fail'])
print "%d total" % (results['pass'] + results['rex_pass'] + results['nasm_fail'] + results['fail'])
|
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Shared unit test utilities."""
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import sys
from splunklib import six
# Run the test suite on the SDK without installing it.
sys.path.insert(0, '../')
sys.path.insert(0, '../examples')
import splunklib.client as client
from time import sleep
from datetime import datetime, timedelta
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
import os
import time
import logging
logging.basicConfig(
filename='test.log',
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s:%(message)s")
class NoRestartRequiredError(Exception):
pass
class WaitTimedOutError(Exception):
pass
def to_bool(x):
if x == '1':
return True
elif x == '0':
return False
else:
raise ValueError("Not a boolean value: %s", x)
def tmpname():
name = 'delete-me-' + str(os.getpid()) + str(time.time()).replace('.','-')
return name
def wait(predicate, timeout=60, pause_time=0.5):
assert pause_time < timeout
start = datetime.now()
diff = timedelta(seconds=timeout)
while not predicate():
if datetime.now() - start > diff:
logging.debug("wait timed out after %d seconds", timeout)
raise WaitTimedOutError
sleep(pause_time)
logging.debug("wait finished after %s seconds", datetime.now()-start)
class SDKTestCase(unittest.TestCase):
restart_already_required = False
installedApps = []
def assertEventuallyTrue(self, predicate, timeout=30, pause_time=0.5,
timeout_message="Operation timed out."):
assert pause_time < timeout
start = datetime.now()
diff = timedelta(seconds=timeout)
while not predicate():
if datetime.now() - start > diff:
logging.debug("wait timed out after %d seconds", timeout)
self.fail(timeout_message)
sleep(pause_time)
logging.debug("wait finished after %s seconds", datetime.now()-start)
def check_content(self, entity, **kwargs):
for k, v in six.iteritems(kwargs):
self.assertEqual(entity[k], str(v))
def check_entity(self, entity):
assert entity is not None
self.assertTrue(entity.name is not None)
self.assertTrue(entity.path is not None)
self.assertTrue(entity.state is not None)
self.assertTrue(entity.content is not None)
# Verify access metadata
assert entity.access is not None
entity.access.app
entity.access.owner
entity.access.sharing
# Verify content metadata
# In some cases, the REST API does not return field metadata for when
# entities are intially listed by a collection, so we refresh to make
# sure the metadata is available.
entity.refresh()
self.assertTrue(isinstance(entity.fields.required, list))
self.assertTrue(isinstance(entity.fields.optional, list))
self.assertTrue(isinstance(entity.fields.wildcard, list))
# Verify that all required fields appear in entity content
for field in entity.fields.required:
try:
self.assertTrue(field in entity.content)
except:
# Check for known exceptions
if "configs/conf-times" in entity.path:
if field in ["is_sub_menu"]:
continue
raise
def clear_restart_message(self):
"""Tell Splunk to forget that it needs to be restarted.
This is used mostly in cases such as deleting a temporary application.
Splunk asks to be restarted when that happens, but unless the application
contained modular input kinds or the like, it isn't necessary.
"""
if not self.service.restart_required:
raise ValueError("Tried to clear restart message when there was none.")
try:
self.service.delete("messages/restart_required")
except client.HTTPError as he:
if he.status == 404:
pass
else:
raise
@contextlib.contextmanager
def fake_splunk_version(self, version):
original_version = self.service.splunk_version
try:
self.service._splunk_version = version
yield
finally:
self.service._splunk_version = original_version
def install_app_from_collection(self, name):
collectionName = 'sdkappcollection'
if collectionName not in self.service.apps:
raise ValueError("sdk-test-application not installed in splunkd")
appPath = self.pathInApp(collectionName, ["build", name+".tar"])
kwargs = {"update": True, "name": appPath, "filename": True}
try:
self.service.post("apps/local", **kwargs)
except client.HTTPError as he:
if he.status == 400:
raise IOError("App %s not found in app collection" % name)
if self.service.restart_required:
self.service.restart(120)
self.installedApps.append(name)
def app_collection_installed(self):
collectionName = 'sdkappcollection'
return collectionName in self.service.apps
def pathInApp(self, appName, pathComponents):
r"""Return a path to *pathComponents* in *appName*.
`pathInApp` is used to refer to files in applications installed with
`install_app_from_collection`. For example, the app `file_to_upload` in
the collection contains `log.txt`. To get the path to it, call::
pathInApp('file_to_upload', ['log.txt'])
The path to `setup.xml` in `has_setup_xml` would be fetched with::
pathInApp('has_setup_xml', ['default', 'setup.xml'])
`pathInApp` figures out the correct separator to use (based on whether
splunkd is running on Windows or Unix) and joins the elements in
*pathComponents* into a path relative to the application specified by
*appName*.
*pathComponents* should be a list of strings giving the components.
This function will try to figure out the correct separator (/ or \)
for the platform that splunkd is running on and construct the path
as needed.
:return: A string giving the path.
"""
splunkHome = self.service.settings['SPLUNK_HOME']
if "\\" in splunkHome:
# This clause must come first, since Windows machines may
# have mixed \ and / in their paths.
separator = "\\"
elif "/" in splunkHome:
separator = "/"
else:
raise ValueError("No separators in $SPLUNK_HOME. Can't determine what file separator to use.")
appPath = separator.join([splunkHome, "etc", "apps", appName] + pathComponents)
return appPath
def uncheckedRestartSplunk(self, timeout=240):
self.service.restart(timeout)
def restartSplunk(self, timeout=240):
if self.service.restart_required:
self.service.restart(timeout)
else:
raise NoRestartRequiredError()
@classmethod
def setUpClass(cls):
cls.opts = parse([], {}, ".splunkrc")
# Before we start, make sure splunk doesn't need a restart.
service = client.connect(**cls.opts.kwargs)
if service.restart_required:
service.restart(timeout=120)
def setUp(self):
unittest.TestCase.setUp(self)
self.service = client.connect(**self.opts.kwargs)
# If Splunk is in a state requiring restart, go ahead
# and restart. That way we'll be sane for the rest of
# the test.
if self.service.restart_required:
self.restartSplunk()
logging.debug("Connected to splunkd version %s", '.'.join(str(x) for x in self.service.splunk_version))
def tearDown(self):
from splunklib.binding import HTTPError
if self.service.restart_required:
self.fail("Test left Splunk in a state requiring a restart.")
for appName in self.installedApps:
if appName in self.service.apps:
try:
self.service.apps.delete(appName)
wait(lambda: appName not in self.service.apps)
except HTTPError as error:
if not (os.name == 'nt' and error.status == 500):
raise
print('Ignoring failure to delete {0} during tear down: {1}'.format(appName, error))
if self.service.restart_required:
self.clear_restart_message()
|
|
# ######################### LICENSE ############################ #
# Copyright (c) 2005-2021, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see
https://github.com/micheles/decorator/blob/master/docs/documentation.md
for the documentation.
"""
import re
import sys
import inspect
import operator
import itertools
from contextlib import _GeneratorContextManager
from inspect import getfullargspec, iscoroutinefunction, isgeneratorfunction
__version__ = '5.1.1'
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
POS = inspect.Parameter.POSITIONAL_OR_KEYWORD
EMPTY = inspect.Parameter.empty
# this is not used anymore in the core, but kept for backward compatibility
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isroutine(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"""
Update the signature of func with the data in self
"""
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"""
Make a new function from a given template and update the signature
"""
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % next(self._compile_count)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def fix(args, kwargs, sig):
"""
Fix args and kwargs to be consistent with the signature
"""
ba = sig.bind(*args, **kwargs)
ba.apply_defaults() # needed for test_dan_schult
return ba.args, ba.kwargs
def decorate(func, caller, extras=(), kwsyntax=False):
"""
Decorates a function/generator/coroutine using a caller.
If kwsyntax is True calling the decorated functions with keyword
syntax will pass the named arguments inside the ``kw`` dictionary,
even if such argument are positional, similarly to what functools.wraps
does. By default kwsyntax is False and the the arguments are untouched.
"""
sig = inspect.signature(func)
if iscoroutinefunction(caller):
async def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
return await caller(func, *(extras + args), **kw)
elif isgeneratorfunction(caller):
def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
for res in caller(func, *(extras + args), **kw):
yield res
else:
def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
return caller(func, *(extras + args), **kw)
fun.__name__ = func.__name__
fun.__doc__ = func.__doc__
fun.__wrapped__ = func
fun.__signature__ = sig
fun.__qualname__ = func.__qualname__
# builtin functions like defaultdict.__setitem__ lack many attributes
try:
fun.__defaults__ = func.__defaults__
except AttributeError:
pass
try:
fun.__kwdefaults__ = func.__kwdefaults__
except AttributeError:
pass
try:
fun.__annotations__ = func.__annotations__
except AttributeError:
pass
try:
fun.__module__ = func.__module__
except AttributeError:
pass
try:
fun.__dict__.update(func.__dict__)
except AttributeError:
pass
return fun
def decoratorx(caller):
"""
A version of "decorator" implemented via "exec" and not via the
Signature object. Use this if you are want to preserve the `.__code__`
object properties (https://github.com/micheles/decorator/issues/129).
"""
def dec(func):
return FunctionMaker.create(
func,
"return _call_(_func_, %(shortsignature)s)",
dict(_call_=caller, _func_=func),
__wrapped__=func, __qualname__=func.__qualname__)
return dec
def decorator(caller, _func=None, kwsyntax=False):
"""
decorator(caller) converts a caller function into a decorator
"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller, (), kwsyntax)
# else return a decorator function
sig = inspect.signature(caller)
dec_params = [p for p in sig.parameters.values() if p.kind is POS]
def dec(func=None, *args, **kw):
na = len(args) + 1
extras = args + tuple(kw.get(p.name, p.default)
for p in dec_params[na:]
if p.default is not EMPTY)
if func is None:
return lambda func: decorate(func, caller, extras, kwsyntax)
else:
return decorate(func, caller, extras, kwsyntax)
dec.__signature__ = sig.replace(parameters=dec_params)
dec.__name__ = caller.__name__
dec.__doc__ = caller.__doc__
dec.__wrapped__ = caller
dec.__qualname__ = caller.__qualname__
dec.__kwdefaults__ = getattr(caller, '__kwdefaults__', None)
dec.__dict__.update(caller.__dict__)
return dec
# ####################### contextmanager ####################### #
class ContextManager(_GeneratorContextManager):
def __init__(self, g, *a, **k):
_GeneratorContextManager.__init__(self, g, a, k)
def __call__(self, func):
def caller(f, *a, **k):
with self.__class__(self.func, *self.args, **self.kwds):
return f(*a, **k)
return decorate(func, caller)
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
|
|
# Copyright (c) 2010, Ryan Bourgeois <bluedragonx@gmail.com>
# All rights reserved.
#
# This software is licensed under a modified BSD license as defined in the
# provided license file at the root of this project. You may modify and/or
# distribute in accordance with those terms.
#
# This software is provided "as is" and any express or implied warranties,
# including, but not limited to, the implied warranties of merchantability and
# fitness for a particular purpose are disclaimed.
from couchdbkit.resource import ResourceNotFound
from whatcouch.test import Config
from whatcouch.adapters import PermissionAdapter
from whatcouch.model import Group, Permission
class TestPermissionAdapterPopulated:
"""
Test the permission adapter against a populated database.
"""
@staticmethod
def setup_class():
"""
Create the permission adapter, add permissions and groups to the
database, and add expected values to the configuration.
"""
Config.adapter = PermissionAdapter(Config.t11)
p1 = Permission(name='p1') # will have multiple groups
p2 = Permission(name='p2') # will have one groups
p3 = Permission(name='p3') # will have no group
Config.perms = [p1, p2, p3]
Permission.bulk_save(Config.perms)
g1 = Group(name='g1') # belongs to p1, p2
g2 = Group(name='g2') # belongs to p1
g3 = Group(name='g3') # belongs to no groups
g1.permissions.append(p1)
g1.permissions.append(p2)
g2.permissions.append(p1)
Config.groups = [g1, g2, g3]
Group.bulk_save(Config.groups)
Config.sections = {
'p1': ['g1', 'g2'],
'p2': ['g1'],
'p3': []}
Config.items = {
'g1': ['p1', 'p2'],
'g2': ['p1'],
'g3': []}
@staticmethod
def teardown_class():
"""
Delete permissions and groups from the database and delete
configured attributes from the config.
"""
for group in Config.groups:
try:
group.delete()
except ResourceNotFound:
pass
for perm in Config.perms:
try:
perm.delete()
except ResourceNotFound:
pass
del Config.groups
del Config.perms
del Config.sections
del Config.items
del Config.adapter
def test_get_group__found(self):
"""
Test PermissionAdapter._get_group() for an existing group.
"""
groupname = 'g1'
group = Config.adapter._get_group(groupname)
assert group is not None
assert isinstance(group, Group)
assert group.name == groupname
def test_get_group__notfound(self):
"""
Test PermissionAdapter._get_group() for a nonexistent group.
"""
groupname = 'g4'
group = Config.adapter._get_group(groupname)
assert group is None
def test_get_perm__found(self):
"""
Test PermissionAdapter._get_perm() for an existing permission.
"""
permname = 'p1'
perm = Config.adapter._get_perm(permname)
assert perm is not None
assert isinstance(perm, Permission)
assert perm.name == permname
def test_get_perm__notfound(self):
"""
Test PermissionAdapter._get_perm() for a nonexistent permission.
"""
permname = 'noperm'
perm = Config.adapter._get_perm(permname)
assert perm is None
def test_get_all_sections(self):
"""
Test PermissionAdapter._get_all_sections().
"""
sections = Config.adapter._get_all_sections()
assert type(sections) == dict
assert len(sections) == len(Config.sections)
for section, items in sections.iteritems():
assert section in Config.sections
eitems = Config.sections[section]
assert type(items) == list
assert len(items) == len(eitems)
for item in items:
assert item in eitems
def _get_section_items(self, section):
"""
Test PermissionAdapter._get_section_items() for the given permission.
"""
items = Config.adapter._get_section_items(section)
assert type(items) == list
if section in Config.sections:
eitems = Config.sections[section]
assert len(items) == len(eitems)
for item in items:
assert item in eitems
else:
assert len(items) == 0
def test_get_section_items__manyfound(self):
"""
Test PermissionAdapter._get_section_items() for a permission with
multiple groups.
"""
self._get_section_items('p1')
def test_get_section_items__onefound(self):
"""
Test PermissionAdapter._get_section_items() for a permission with
one groups.
"""
self._get_section_items('p2')
def test_get_section_items__notfound(self):
"""
Test PermissionAdapter._get_section_items() for a permission
with no groups.
"""
self._get_section_items('p3')
def test_get_section_items__noperm(self):
"""
Test PermissionAdapter._get_section_items() for a nonexistent
permission.
"""
self._get_section_items('noperm')
def _find_sections(self, group):
"""
Test PermissionAdapter._find_sections() for a given group.
:param group: The group to test _find_sections() against.
"""
sections = Config.adapter._find_sections(group)
assert type(sections) == list
if group in Config.items:
esections = Config.items[group]
assert len(sections) == len(esections)
for section in sections:
assert section in esections
else:
assert len(sections) == 0
def test_find_sections__manyfound(self):
"""
Test PermissionAdapter._find_sections() for a permission with
multiple groups.
"""
self._find_sections('g1')
def test_find_sections__onefound(self):
"""
Test PermissionAdapter._find_sections() for a permission with one
group.
"""
self._find_sections('g2')
def test_find_sections__notfound(self):
"""
Test PermissionAdapter._find_sections() for a permission with no
groups.
"""
self._find_sections('g3')
def test_find_sections__nogroup(self):
"""
Test PermissionAdapter._find_sections() for a nonexistent permission.
"""
self._find_sections('nogroup')
def test_item_is_included__true(self):
"""
Test PermissionAdapter._item_is_included() for a permission containing
the group.
"""
assert Config.adapter._item_is_included('p1', 'g1') == True
def test_item_is_included__false(self):
"""
Test PermissionAdapter._item_is_included() for a permission not
containing the group.
"""
assert Config.adapter._item_is_included('p3', 'g1') == False
def test_item_is_included__noperm(self):
"""
Test PermissionAdapter._item_is_included() for a nonexistent permission
and an existing group.
"""
assert Config.adapter._item_is_included('noperm', 'g1') == False
def test_item_is_included__nogroup(self):
"""
Test PermissionAdapter._item_is_included for an existing permission
and a nonexistent group.
"""
assert Config.adapter._item_is_included('p1', 'nogroup') == False
def _include_items(self, section, items):
"""
Test PermissionAdapter._include_items() for a given section and items.
:param section: The permission to test _include_items() against.
:param items: The groups to test _include_items() against.
"""
Config.adapter._include_items(section, items)
groups = [ Config.adapter._get_group(item) for item in items ]
for group in groups:
assert group is not None
found = False
for i in range(len(group.permissions)-1, -1, -1):
if group.permissions[i].name == section:
found = True
del group.permissions[i]
group.save()
assert found
def test_include_items__one(self):
"""
Test PermissionAdapter._include_items() with one group.
"""
self._include_items('p3', ['g1'])
def test_include_items__many(self):
"""
Test PermissionAdapter._include_items() with multiple groups.
"""
self._include_items('p3', ['g1', 'g2'])
def test_include_items__noperm(self):
"""
Test PermissionAdapter._include_items() with a nonexistent permission.
"""
try:
Config.adapter._include_items('noperm', ['g1'])
except:
assert False
def test_include_items__nogroup(self):
"""
Test PermissionAdapter._include_items() with a nonexistent group.
"""
try:
Config.adapter._include_items('p3', ['nogroup'])
except:
assert False
def _exclude_items(self, section, items):
"""
Test PermissionAdapter._exclude_items() against the given permission and groups.
:param section: The permission to test _exclude_items() against.
:param items: The groups to test _exclude_items() against.
"""
Config.adapter._exclude_items(section, items)
perm = Config.adapter._get_perm(section)
groups = [ Config.adapter._get_group(item) for item in items ]
found_any = False
for group in groups:
found = False
for cperm in group.permissions:
if cperm.name == section:
found = True
if not found:
print '%s -> %s' % (group.name, perm.name)
group.permissions.append(perm)
else:
found_any = True
Group.bulk_save(groups)
assert not found
def test_exclude_items__one(self):
"""
Test PermissionAdapter._exclude_items() with one group.
"""
self._exclude_items('p1', ['g1'])
def test_exclude_items__many(self):
"""
Test PermissionAdapter._exclude_items() with multiple groups.
"""
self._exclude_items('p1', ['g1', 'g2'])
def test_exclude_items__noperm(self):
"""
Test PermissionAdapter._exclude_items() with a nonexistent permission.
"""
try:
Config.adapter._exclude_items('noperm', ['g1'])
except:
assert False
def test_exclude_items__nogroup(self):
"""
Test PermissionAdapter._exclude_items() with a nonexistent group.
"""
try:
Config.adapter._exclude_items('p1', ['nogroup'])
except:
assert False
def test_section_exists__true(self):
"""
Test PermissionAdapter._section_exists() against an existing
permission.
"""
assert Config.adapter._section_exists('p1')
def test_section_exists__false(self):
"""
Test PermissionAdapter._section_exists() against a nonexistent
permission.
"""
assert not Config.adapter._section_exists('noperm')
def test_create_section(self):
"""
Test PermissionAdapter._create_section().
"""
section = 'newperm'
Config.adapter._create_section(section)
perm = Config.adapter._get_perm(section)
assert perm is not None
perm.delete()
def test_edit_section(self):
"""
Test PermissionAdapter._edit_section().
"""
old_section = 'oldperm'
new_section = 'newperm'
perm = Permission(name=old_section)
perm.save()
Config.adapter._edit_section(old_section, new_section)
new_perm = Config.adapter._get_perm(new_section)
assert new_perm is not None
assert new_perm.name == new_section
new_perm.delete()
def test_delete_section(self):
"""
Test PermissionAdapter._delete_section().
"""
section = 'delperm'
perm = Permission(name=section)
perm.save()
Config.adapter._delete_section(section)
del_perm = Config.adapter._get_perm(section)
assert del_perm is None
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for a template::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import errno
import tempfile
from hashlib import md5
from time import time
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
from werkzeug._compat import iteritems, string_types, text_type, \
integer_types, to_native
from werkzeug.posixemulation import rename
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, 'items'):
return iteritems(mappingorseq)
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout (in seconds) that is used if no
timeout is specified on :meth:`set`. A timeout of 0
indicates that the cache never expires.
"""
def __init__(self, default_timeout=0):
self.default_timeout = default_timeout
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
return None
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
return True
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created::
foo, bar = cache.get_many("foo", "bar")
Has the same error handling as :meth:`get`.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Like :meth:`get_many` but return a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=0):
"""Add a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0 idicates
that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
return True
def add(self, key, value, timeout=0):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified. A timeout of 0 indicates
that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
return True
def set_many(self, mapping, timeout=0):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0
indicates tht the cache never expires.
:returns: Whether all given keys have been set.
:rtype: boolean
"""
rv = True
for key, value in _items(mapping):
if not self.set(key, value, timeout):
rv = False
return rv
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
:returns: Whether all given keys have been deleted.
:rtype: boolean
"""
return all(self.delete(key) for key in keys)
def has(self, key):
"""Checks if a key exists in the cache without returning it. This is a
cheap operation that bypasses loading the actual data on the backend.
This method is optional and may not be implemented on all caches.
:param key: the key to check
"""
raise NotImplementedError(
'%s doesn\'t have an efficient implementation of `has`. That '
'means it is impossible to check whether a key exists without '
'fully loading the key\'s data. Consider using `self.get` '
'explicitly if you don\'t care about performance.'
)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
return True
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
value = (self.get(key) or 0) + delta
return value if self.set(key, value) else None
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
:returns: The new value or `None` for backend errors.
"""
value = (self.get(key) or 0) - delta
return value if self.set(key, value) else None
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
"""
def __init__(self, threshold=500, default_timeout=0):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
toremove = []
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if (expires != 0 and expires <= now) or idx % 3 == 0:
toremove.append(key)
for key in toremove:
#self._cache.pop(key, None)
#MOD: non-delete cached elements automatically
pass
def _get_expiration(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = time() + timeout
return timeout
def get(self, key):
try:
expires, value = self._cache[key]
if expires == 0 or expires > time():
return pickle.loads(value)
except (KeyError, pickle.PickleError):
return None
def set(self, key, value, timeout=0):
expires = self._get_expiration(timeout)
self._prune()
self._cache[key] = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
return True
def add(self, key, value, timeout=0):
expires = self._get_expiration(timeout)
self._prune()
item = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
if key in self._cache:
return False
self._cache.setdefault(key, item)
return True
def delete(self, key):
return self._cache.pop(key, None) is not None
def has(self, key):
try:
expires, value = self._cache[key]
return expires == 0 or expires > time()
except KeyError:
return False
_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
This cache looks into the following packages/modules to find bindings for
memcached:
- ``pylibmc``
- ``google.appengine.api.memcached``
- ``memcached``
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates taht the cache never expires.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=0, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_native(key_prefix)
def _normalize_key(self, key):
key = to_native(key, 'utf-8')
if self.key_prefix:
key = self.key_prefix + key
return key
def _normalize_timeout(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = int(time()) + timeout
return timeout
def get(self, key):
key = self._normalize_key(key)
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occur because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
encoded_key = self._normalize_key(key)
if not isinstance(key, str):
have_encoded_keys = True
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=0):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.add(key, value, timeout)
def set(self, key, value, timeout=0):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=0):
new_mapping = {}
for key, value in _items(mapping):
key = self._normalize_key(key)
new_mapping[key] = value
timeout = self._normalize_timeout(timeout)
failed_keys = self._client.set_multi(new_mapping, timeout)
return not failed_keys
def delete(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
key = self._normalize_key(key)
if _test_memcached_key(key):
new_keys.append(key)
return self._client.delete_multi(new_keys)
def has(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.append(key, '')
return False
def clear(self):
return self._client.flush_all()
def inc(self, key, delta=1):
key = self._normalize_key(key)
return self._client.incr(key, delta)
def dec(self, key, delta=1):
key = self._normalize_key(key)
return self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
.. versionchanged:: 0.10
``**kwargs`` is now passed to the redis object.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param key_prefix: A prefix that should be added to all keys.
Any additional keyword arguments will be passed to ``redis.Redis``.
"""
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=0, key_prefix=None, **kwargs):
BaseCache.__init__(self, default_timeout)
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
if kwargs.get('decode_responses', None):
raise ValueError('decode_responses is not supported by '
'RedisCache.')
self._client = redis.Redis(host=host, port=port, password=password,
db=db, **kwargs)
else:
self._client = host
self.key_prefix = key_prefix or ''
def _get_expiration(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout == 0:
timeout = -1
return timeout
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode('ascii')
return b'!' + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be called with
None.
"""
if value is None:
return None
if value.startswith(b'!'):
try:
return pickle.loads(value[1:])
except pickle.PickleError:
return None
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=0):
timeout = self._get_expiration(timeout)
dump = self.dump_object(value)
if timeout == -1:
result = self._client.set(name=self.key_prefix + key,
value=dump)
else:
result = self._client.setex(name=self.key_prefix + key,
value=dump, time=timeout)
return result
def add(self, key, value, timeout=0):
timeout = self._get_expiration(timeout)
dump = self.dump_object(value)
return (
self._client.setnx(name=self.key_prefix + key, value=dump) and
self._client.expire(name=self.key_prefix + key, time=timeout)
)
def set_many(self, mapping, timeout=0):
timeout = self._get_expiration(timeout)
# Use transaction=False to batch without calling redis MULTI
# which is not supported by twemproxy
pipe = self._client.pipeline(transaction=False)
for key, value in _items(mapping):
dump = self.dump_object(value)
if timeout == -1:
pipe.set(name=self.key_prefix + key, value=dump)
else:
pipe.setex(name=self.key_prefix + key, value=dump,
time=timeout)
return pipe.execute()
def delete(self, key):
return self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return self._client.delete(*keys)
def has(self, key):
return self._client.exists(self.key_prefix + key)
def clear(self):
status = False
if self.key_prefix:
keys = self._client.keys(self.key_prefix + '*')
if keys:
status = self._client.delete(*keys)
else:
status = self._client.flushdb()
return status
def inc(self, key, delta=1):
return self._client.incr(name=self.key_prefix + key, amount=delta)
def dec(self, key, delta=1):
return self._client.decr(name=self.key_prefix + key, amount=delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold=500, default_timeout=0,
mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
try:
os.makedirs(self._path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
try:
for idx, fname in enumerate(entries):
remove = False
with open(fname, 'rb') as f:
expires = pickle.load(f)
remove = (expires != 0 and expires <= now) or idx % 3 == 0
if remove:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
return False
return True
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8') # XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return pickle.load(f)
else:
os.remove(filename)
return None
except (IOError, OSError, pickle.PickleError):
return None
def add(self, key, value, timeout=0):
filename = self._get_filename(key)
if not os.path.exists(filename):
return self.set(key, value, timeout)
return False
def set(self, key, value, timeout=0):
if timeout is None:
timeout = int(time() + self.default_timeout)
elif timeout != 0:
timeout = int(time() + timeout)
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self._path)
with os.fdopen(fd, 'wb') as f:
pickle.dump(timeout, f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
return False
else:
return True
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
return False
else:
return True
def has(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return True
else:
os.remove(filename)
return False
except (IOError, OSError, pickle.PickleError):
return False
|
|
"""Test kytos.core.switch module."""
import asyncio
import json
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from kytos.core import Controller
from kytos.core.config import KytosConfig
from kytos.core.constants import FLOOD_TIMEOUT
from kytos.core.interface import Interface
from kytos.core.switch import Switch
def get_date():
"""Return date with FLOOD_TIMEOUT+1 microseconds."""
return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)
# pylint: disable=protected-access, too-many-public-methods
class TestSwitch(TestCase):
"""Switch tests."""
def setUp(self):
"""Instantiate a controller."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.options = KytosConfig().options['daemon']
self.controller = Controller(self.options, loop=self.loop)
self.controller.log = Mock()
self.switch = self.create_switch()
@staticmethod
def create_switch():
"""Create a new switch."""
connection = MagicMock()
connection.address = 'addr'
connection.port = 'port'
connection.protocol.version = 0x04
switch = Switch('00:00:00:00:00:00:00:01', connection)
switch._enabled = True
return switch
def test_repr(self):
"""Test repr() output."""
expected_repr = "Switch('00:00:00:00:00:00:00:01')"
self.assertEqual(repr(self.switch), expected_repr)
def test_id(self):
"""Test id property."""
self.assertEqual(self.switch.id, '00:00:00:00:00:00:00:01')
def test_ofp_version(self):
"""Test ofp_version property."""
self.assertEqual(self.switch.ofp_version, '0x04')
def test_ofp_version__none(self):
"""Test ofp_version property when connection is none."""
self.switch.connection = None
self.assertIsNone(self.switch.ofp_version)
def tearDown(self):
"""TearDown."""
self.loop.close()
def test_switch_vlan_pool_default(self):
"""Test default vlan_pool value."""
self.assertEqual(self.options.vlan_pool, {})
def test_switch_vlan_pool_options(self):
"""Test switch with the example from kytos.conf."""
dpid = "00:00:00:00:00:00:00:01"
vlan_pool = {"00:00:00:00:00:00:00:01":
{"1": [[1, 2], [5, 10]], "4": [[3, 4]]}}
self.controller.switches[dpid] = self.switch
self.options.vlan_pool = vlan_pool
self.controller.get_switch_or_create(dpid, self.switch.connection)
port_id = 1
intf = self.controller.switches[dpid].interfaces[port_id]
tag_values = [tag.value for tag in intf.available_tags]
self.assertEqual(tag_values, [1, 5, 6, 7, 8, 9])
port_id = 4
intf = self.controller.switches[dpid].interfaces[port_id]
tag_values = [tag.value for tag in intf.available_tags]
self.assertEqual(tag_values, [3])
# this port number doesn't exist yet.
port_7 = 7
intf = Interface("test", port_7, self.switch)
# no attr filters, so should associate as it is
self.controller.switches[dpid].update_interface(intf)
intf_obj = self.controller.switches[dpid].interfaces[port_7]
self.assertEqual(intf_obj, intf)
# assert default vlan_pool range (1, 4096)
tag_values = [tag.value for tag in intf_obj.available_tags]
self.assertEqual(tag_values, list(range(1, 4096)))
def test_update_description(self):
"""Test update_description method."""
desc = MagicMock()
desc.mfr_desc.value = 'mfr_desc'
desc.hw_desc.value = 'hw_desc'
desc.sw_desc.value = 'sw_desc'
desc.serial_num.value = 'serial_num'
desc.dp_desc.value = 'dp_desc'
self.switch.update_description(desc)
self.assertEqual(self.switch.description['manufacturer'], 'mfr_desc')
self.assertEqual(self.switch.description['hardware'], 'hw_desc')
self.assertEqual(self.switch.description['software'], 'sw_desc')
self.assertEqual(self.switch.description['serial'], 'serial_num')
self.assertEqual(self.switch.description['data_path'], 'dp_desc')
def test_disable(self):
"""Test disable method."""
interface = MagicMock()
self.switch.interfaces = {"1": interface}
self.switch.disable()
interface.disable.assert_called()
self.assertFalse(self.switch._enabled)
def test_disconnect(self):
"""Test disconnect method."""
self.switch.disconnect()
self.assertIsNone(self.switch.connection)
def test_get_interface_by_port_no(self):
"""Test get_interface_by_port_no method."""
interface_1 = MagicMock(port_number='1')
interface_2 = MagicMock(port_number='2')
self.switch.interfaces = {'1': interface_1, '2': interface_2}
expected_interface_1 = self.switch.get_interface_by_port_no('1')
expected_interface_2 = self.switch.get_interface_by_port_no('3')
self.assertEqual(expected_interface_1, interface_1)
self.assertIsNone(expected_interface_2)
def test_get_flow_by_id(self):
"""Test get_flow_by_id method."""
flow_1 = MagicMock(id='1')
flow_2 = MagicMock(id='2')
self.switch.flows = [flow_1, flow_2]
expected_flow_1 = self.switch.get_flow_by_id('1')
expected_flow_2 = self.switch.get_flow_by_id('3')
self.assertEqual(expected_flow_1, flow_1)
self.assertIsNone(expected_flow_2)
def test_is_connected__true(self):
"""Test is_connected method."""
connection = MagicMock()
connection.is_alive.return_value = True
connection.is_established.return_value = True
self.switch.connection = connection
self.switch.is_active = MagicMock()
self.switch.is_active.return_value = True
self.assertTrue(self.switch.is_connected())
def test_is_connected__not_connection(self):
"""Test is_connected method when connection does not exist."""
self.switch.connection = None
self.switch.is_active = MagicMock()
self.switch.is_active.return_value = True
self.assertFalse(self.switch.is_connected())
def test_is_connected__not_alive(self):
"""Test is_connected method when switch is not active."""
connection = MagicMock()
connection.is_alive.return_value = True
connection.is_established.return_value = True
self.switch.connection = connection
self.switch.is_active = MagicMock()
self.switch.is_active.return_value = False
self.assertFalse(self.switch.is_connected())
def test_update_connection(self):
"""Test update_connection method."""
connection = MagicMock()
self.switch.update_connection(connection)
self.assertEqual(self.switch.connection, connection)
self.assertEqual(self.switch.connection.switch, self.switch)
def test_update_features(self):
"""Test update_features method."""
self.switch.update_features('features')
self.assertEqual(self.switch.features, 'features')
def test_send(self):
"""Test send method."""
self.switch.send('buffer')
self.switch.connection.send.assert_called_with('buffer')
@patch('kytos.core.switch.now', return_value=get_date())
def test_update_lastseen(self, mock_now):
"""Test update_lastseen method."""
self.switch.update_lastseen()
self.assertEqual(self.switch.lastseen, mock_now.return_value)
def test_update_interface(self):
"""Test update_interface method."""
interface = MagicMock(port_number=1)
self.switch.update_interface(interface)
self.assertEqual(self.switch.interfaces[1], interface)
def test_remove_interface(self):
"""Test remove_interface method."""
interface = MagicMock(port_number=1)
self.switch.interfaces[1] = interface
self.switch.remove_interface(interface)
self.assertEqual(self.switch.interfaces, {})
def test_update_mac_table(self):
"""Test update_mac_table method."""
mac = MagicMock(value='00:00:00:00:00:00')
self.switch.update_mac_table(mac, 1)
self.switch.update_mac_table(mac, 2)
self.assertEqual(self.switch.mac2port[mac.value], {1, 2})
def test_last_flood(self):
"""Test last_flood method."""
self.switch.flood_table['hash'] = 'timestamp'
ethernet_frame = MagicMock()
ethernet_frame.get_hash.return_value = 'hash'
last_flood = self.switch.last_flood(ethernet_frame)
self.assertEqual(last_flood, 'timestamp')
def test_last_flood__error(self):
"""Test last_flood method to error case."""
ethernet_frame = MagicMock()
ethernet_frame.get_hash.return_value = 'hash'
last_flood = self.switch.last_flood(ethernet_frame)
self.assertIsNone(last_flood)
@patch('kytos.core.switch.now', return_value=get_date())
def test_should_flood(self, _):
"""Test should_flood method."""
self.switch.flood_table['hash1'] = datetime(2000, 1, 1, 0, 0, 0, 0)
self.switch.flood_table['hash2'] = datetime(2000, 1, 1, 0, 0, 0,
FLOOD_TIMEOUT)
ethernet_frame = MagicMock()
ethernet_frame.get_hash.side_effect = ['hash1', 'hash2']
should_flood_1 = self.switch.should_flood(ethernet_frame)
should_flood_2 = self.switch.should_flood(ethernet_frame)
self.assertTrue(should_flood_1)
self.assertFalse(should_flood_2)
@patch('kytos.core.switch.now', return_value=get_date())
def test_update_flood_table(self, mock_now):
"""Test update_flood_table method."""
ethernet_frame = MagicMock()
ethernet_frame.get_hash.return_value = 'hash'
self.switch.update_flood_table(ethernet_frame)
self.assertEqual(self.switch.flood_table['hash'],
mock_now.return_value)
def test_where_is_mac(self):
"""Test where_is_mac method."""
mac = MagicMock(value='00:00:00:00:00:00')
expected_ports_1 = self.switch.where_is_mac(mac)
self.switch.mac2port['00:00:00:00:00:00'] = set([1, 2, 3])
expected_ports_2 = self.switch.where_is_mac(mac)
self.assertIsNone(expected_ports_1)
self.assertEqual(expected_ports_2, [1, 2, 3])
def test_as_dict(self):
"""Test as_dict method."""
expected_dict = {'id': '00:00:00:00:00:00:00:01',
'name': '00:00:00:00:00:00:00:01',
'dpid': '00:00:00:00:00:00:00:01',
'connection': 'addr:port',
'ofp_version': '0x04',
'type': 'switch',
'manufacturer': '',
'serial': '',
'hardware': '',
'software': None,
'data_path': '',
'interfaces': {},
'metadata': {},
'active': True,
'enabled': True}
self.assertEqual(self.switch.as_dict(), expected_dict)
def test_as_json(self):
"""Test as_json method."""
expected_json = json.dumps({'id': '00:00:00:00:00:00:00:01',
'name': '00:00:00:00:00:00:00:01',
'dpid': '00:00:00:00:00:00:00:01',
'connection': 'addr:port',
'ofp_version': '0x04',
'type': 'switch',
'manufacturer': '',
'serial': '',
'hardware': '',
'software': None,
'data_path': '',
'interfaces': {},
'metadata': {},
'active': True,
'enabled': True})
self.assertEqual(self.switch.as_json(), expected_json)
|
|
# Copyright 2015, Cisco Systems.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test class for UcsPower module."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.ucs import helper as ucs_helper
from ironic.drivers.modules.ucs import power as ucs_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ucs_error = importutils.try_import('UcsSdk.utils.exception')
INFO_DICT = db_utils.get_test_ucs_info()
CONF = cfg.CONF
class UcsPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(UcsPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ucs")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ucs',
driver_info=driver_info)
CONF.set_override('max_retry', 2, 'cisco_ucs')
CONF.set_override('action_interval', 0, 'cisco_ucs')
self.interface = ucs_power.Power()
def test_get_properties(self):
expected = ucs_helper.COMMON_PROPERTIES
expected.update(ucs_helper.COMMON_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(ucs_helper, 'parse_driver_info',
spec_set=True, autospec=True)
def test_validate(self, mock_parse_driver_info):
mock_parse_driver_info.return_value = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.interface.validate(task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(ucs_helper, 'parse_driver_info',
spec_set=True, autospec=True)
def test_validate_fail(self, mock_parse_driver_info):
side_effect = iter([exception.InvalidParameterValue('Invalid Input')])
mock_parse_driver_info.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_up(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = 'up'
self.assertEqual(states.POWER_ON,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
mock_power.get_power_state.reset_mock()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_down(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = 'down'
self.assertEqual(states.POWER_OFF,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
mock_power.get_power_state.reset_mock()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_error(self, mock_power_helper, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_power.get_power_state.return_value = states.ERROR
self.assertEqual(states.ERROR,
self.interface.get_power_state(task))
mock_power.get_power_state.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_get_power_state_fail(self,
mock_ucs_power,
mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
power = mock_ucs_power.return_value
power.get_power_state.side_effect = (
ucs_error.UcsOperationError(operation='getting power state',
error='failed'))
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.get_power_state,
task)
power.get_power_state.assert_called_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_power_state(self, mock_power_helper, mock__wait, mock_helper):
target_state = states.POWER_ON
mock_power = mock_power_helper.return_value
mock_power.get_power_state.side_effect = ['down', 'up']
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock__wait.return_value = target_state
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(self.interface.set_power_state(task,
target_state))
mock_power.set_power_state.assert_called_once_with('up')
mock_power.get_power_state.assert_called_once_with()
mock__wait.assert_called_once_with(target_state, mock_power)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_power_state_fail(self, mock_power_helper, mock_helper):
mock_power = mock_power_helper.return_value
mock_power.set_power_state.side_effect = (
ucs_error.UcsOperationError(operation='setting power state',
error='failed'))
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.set_power_state,
task, states.POWER_OFF)
mock_power.set_power_state.assert_called_once_with('down')
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
def test_set_power_state_invalid_state(self, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.interface.set_power_state,
task, states.ERROR)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test__wait_for_state_change_already_target_state(
self,
mock_ucs_power,
mock_helper):
mock_power = mock_ucs_power.return_value
target_state = states.POWER_ON
mock_power.get_power_state.return_value = 'up'
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
self.assertEqual(states.POWER_ON,
ucs_power._wait_for_state_change(
target_state, mock_power))
mock_power.get_power_state.assert_called_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test__wait_for_state_change_exceed_iterations(
self,
mock_power_helper,
mock_helper):
mock_power = mock_power_helper.return_value
target_state = states.POWER_ON
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power.get_power_state.side_effect = (
['down', 'down', 'down', 'down'])
self.assertEqual(states.ERROR,
ucs_power._wait_for_state_change(
target_state, mock_power)
)
mock_power.get_power_state.assert_called_with()
self.assertEqual(4, mock_power.get_power_state.call_count)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_set_and_wait_for_state_change_fail(
self,
mock_power_helper,
mock__wait,
mock_helper):
target_state = states.POWER_ON
mock_power = mock_power_helper.return_value
mock_power.get_power_state.return_value = 'down'
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock__wait.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
self.interface.set_power_state,
task,
target_state)
mock_power.set_power_state.assert_called_once_with('up')
mock_power.get_power_state.assert_called_once_with()
mock__wait.assert_called_once_with(target_state, mock_power)
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot(self, mock_power_helper, mock__wait, mock_helper):
mock_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock__wait.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(self.interface.reboot(task))
mock_power.reboot.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot_fail(self, mock_power_helper,
mock_ucs_helper):
mock_ucs_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock_power.reboot.side_effect = (
ucs_error.UcsOperationError(operation='rebooting', error='failed'))
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.UcsOperationError,
self.interface.reboot,
task
)
mock_power.reboot.assert_called_once_with()
@mock.patch('ironic.drivers.modules.ucs.helper.ucs_helper',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power._wait_for_state_change',
spec_set=True, autospec=True)
@mock.patch('ironic.drivers.modules.ucs.power.ucs_power.UcsPower',
spec_set=True, autospec=True)
def test_reboot__wait_state_change_fail(self, mock_power_helper,
mock__wait,
mock_ucs_helper):
mock_ucs_helper.generate_ucsm_handle.return_value = (True, mock.Mock())
mock_power = mock_power_helper.return_value
mock__wait.return_value = states.ERROR
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
self.interface.reboot,
task)
mock_power.reboot.assert_called_once_with()
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:58:39 2015
@author: jpk
This script produces a report of observations taken the last 7 days at SALT
and print it out on the terminal and writes it to a file.
The script runs from today and queries the sdb for data going back 7 days.
"""
import os
import sys
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import numpy as np
from datetime import datetime
import report_queries as rq
import getopt
def string_header(dr):
'''
format the header to be printed and written to file
'''
s = dr.ix[0].to_string().split('\n')
txt = '''
*************** SALT Weekly Observing Stats *****************
A report for %s to
%s
''' %(s[0], s[1])
return txt
def string_weekly_total_time_breakdown(wttb):
# determine the percantages of time broken down in catagories
t = pd.Series(wttb.stack(), index = wttb.stack().index)
t.index = t.index.get_level_values(1)
per = pd.Series(np.zeros(len(t)), index = t.index)
per['Weather':'Science'] = t['Weather':'Science'] / t.Total * 100
per['TimeLostToWeather': 'ScienceTime'] = per['Weather':'Science']
# write out the string:
txt = '''
-------------------------------------------------------------
Time Breakdown:
---------------
Science time: {} ({:,.0f}%)
Engineering time: {} ({:,.0f}%)
Weather: {} ({:,.0f}%)
Problems: {} ({:,.0f}%)
--
Total: {}
'''.format(t.ScienceTime, per.Science,
t.EngineeringTime, per.Engineering,
t.TimeLostToWeather, per.Weather,
t.TimeLostToProblems, per.Problems,
t.NightLength)
return txt
def string_weekly_priority_breakdown(wpb):
# create a percentage column
wpb['per'] = pd.Series(np.zeros(len(wpb)), index = wpb.index)
# determine the percentage from the Time column which is in seconds
wpb.per = (wpb.Tsec / wpb.Tsec.sum()) * 100
txt = wpb.to_string(columns=['Priority', 'No. Blocks', 'per'],
index=False,
header=False,
formatters={'per':'({:,.0f}%)'.format,
'Priority':' {:>5} '.format,
'No. Blocks':' {0:,.0f} '.format})
hdr = '''
-------------------------------------------------------------
Priority BreakDown:
-------------------
Priority No. Blocks
'''
ftr = '''
--
Total {0:,.0f}
'''.format(wpb['No. Blocks'].sum())
return hdr + txt + ftr
def string_weekly_subsystem_breakdown(wsb):
# calculate the percentage of time breakdown
# create a new percentage column
wsb['per'] = pd.Series(np.zeros(len(wsb)), index = wsb.index)
# determine the percentage from the Time column which is in seconds
wsb.per = (wsb.Time / wsb.Time.sum()) * 100
# create a string object to be printed and written to file
txt = wsb.to_string(columns=['SaltSubsystem', 'TotalTime', 'per'],
index=False,
header=False,
formatters={'SaltSubsystem':' {:>11} '.format,
'per':'({:,.0f}%)'.format,
'TotalTime':' {} '.format })
hdr = '''
-------------------------------------------------------------
Problems Time Breakdown
---------------------
SALT Subsystem Total Time
'''
return hdr + txt
def print_to_screen(txt):
'''
this function prints the formatted string to the terminal
'''
ftr = '''
****************** End of Weekly Report *********************
'''
print txt + ftr
return
def write_to_file(dr, txt, dirname='./logs/'):
'''
this function writes the text to a file and names the report accorting
to the date range specified
'''
filename = 'weekly_report_' + datetime.strftime(dr.StartDate[0], '%Y%m%d') + \
'-' + datetime.strftime(dr.EndDate[0], '%Y%m%d') + '.txt'
ftr = '''
****************** End of Weekly Report *********************
'''
with open(dirname+filename, 'w') as f:
f.write(txt + ftr)
def commandLine(argv):
# executes if module is run from the command line
# Testing a datetime check
# if type(arg) is not datetime.date:
# raise TypeError('arg must be a datetime.date, not a %s' % type(arg))
dprint("Reading command line options")
# read command line options
try:
opts,args = getopt.getopt(sys.argv[1:],"vdct:f:i:r:o",
["sdate=","edate=","date=", "interval=","filter=","instrument=","radius=","ocs","help"])
except getopt.GetoptError, inst:
print inst
print 'Use --help to get a list of options'
sys.exit(2)
# parse them to the relevant variables
for opt, arg in opts:
if opt in ('--help'):
usage()
elif opt in ('-v','--verbose'):
verbose=True
elif opt in ('-d','--debug'):
verbose=True # implied
debug=True
elif opt in ('-f','--filter'):
filter = arg
elif opt in ('-i','--instrument'):
ins = arg
elif opt in ('-r','--radius'):
radius = float(arg)
elif opt in ('-t','--target-id'):
target_id = arg
elif opt in ('-c','--current'):
use_current_pointing = True
elif opt in ('-o','--ocs'):
use_ocs = True
else:
print 'Unknown option: ' + opt
usage()
if __name__=='__main__':
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306, user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
# TESTING: save the dataframes
dr_d.save('dr_d')
wpb_d.save('wpd_d')
wtb_d.save('wtb_d')
wttb_d.save('wttd_d')
wsb_d.save('wsb_d')
# format the string needed to print and write to file: _t
dr_t = string_header(dr_d)
wpd_t = string_weekly_priority_breakdown(wpb_d)
wttb_t = string_weekly_total_time_breakdown(wttb_d)
wsb_t = string_weekly_subsystem_breakdown(wsb_d)
# print the report to the terminal
print_to_screen(dr_t + wpd_t + wttb_t + wsb_t)
# write the report to file
write_to_file(dr_d, dr_t + wpd_t + wttb_t + wsb_t)
mysql_con.close()
|
|
"""Support for interface with an LG webOS Smart TV."""
import asyncio
from datetime import timedelta
from functools import wraps
import logging
from aiopylgtv import PyLGTVCmdException, PyLGTVPairException, WebOsClient
from websockets.exceptions import ConnectionClosed
from homeassistant import util
from homeassistant.components.media_player import DEVICE_CLASS_TV, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.webostv.const import (
ATTR_PAYLOAD,
ATTR_SOUND_OUTPUT,
CONF_ON_ACTION,
CONF_SOURCES,
DOMAIN,
LIVE_TV_APP_ID,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CUSTOMIZE,
CONF_HOST,
CONF_NAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
SUPPORT_WEBOSTV = (
SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
)
SUPPORT_WEBOSTV_VOLUME = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the LG webOS Smart TV platform."""
if discovery_info is None:
return
host = discovery_info[CONF_HOST]
name = discovery_info[CONF_NAME]
customize = discovery_info[CONF_CUSTOMIZE]
turn_on_action = discovery_info.get(CONF_ON_ACTION)
client = hass.data[DOMAIN][host]["client"]
on_script = Script(hass, turn_on_action) if turn_on_action else None
entity = LgWebOSMediaPlayerEntity(client, name, customize, on_script)
async_add_entities([entity], update_before_add=False)
def cmd(func):
"""Catch command exceptions."""
@wraps(func)
async def wrapper(obj, *args, **kwargs):
"""Wrap all command methods."""
try:
await func(obj, *args, **kwargs)
except (
asyncio.TimeoutError,
asyncio.CancelledError,
PyLGTVCmdException,
) as exc:
# If TV is off, we expect calls to fail.
if obj.state == STATE_OFF:
level = logging.INFO
else:
level = logging.ERROR
_LOGGER.log(
level,
"Error calling %s on entity %s: %r",
func.__name__,
obj.entity_id,
exc,
)
return wrapper
class LgWebOSMediaPlayerEntity(MediaPlayerEntity):
"""Representation of a LG webOS Smart TV."""
def __init__(self, client: WebOsClient, name: str, customize, on_script=None):
"""Initialize the webos device."""
self._client = client
self._name = name
self._unique_id = client.client_key
self._customize = customize
self._on_script = on_script
# Assume that the TV is not paused
self._paused = False
self._current_source = None
self._source_list = {}
async def async_added_to_hass(self):
"""Connect and subscribe to dispatcher signals and state updates."""
async_dispatcher_connect(self.hass, DOMAIN, self.async_signal_handler)
await self._client.register_state_update_callback(
self.async_handle_state_update
)
async def async_will_remove_from_hass(self):
"""Call disconnect on removal."""
self._client.unregister_state_update_callback(self.async_handle_state_update)
async def async_signal_handler(self, data):
"""Handle domain-specific signal by calling appropriate method."""
entity_ids = data[ATTR_ENTITY_ID]
if entity_ids == ENTITY_MATCH_NONE:
return
if entity_ids == ENTITY_MATCH_ALL or self.entity_id in entity_ids:
params = {
key: value
for key, value in data.items()
if key not in ["entity_id", "method"]
}
await getattr(self, data["method"])(**params)
async def async_handle_state_update(self):
"""Update state from WebOsClient."""
self.update_sources()
self.async_write_ha_state()
def update_sources(self):
"""Update list of sources from current source, apps, inputs and configured list."""
self._source_list = {}
conf_sources = self._customize[CONF_SOURCES]
found_live_tv = False
for app in self._client.apps.values():
if app["id"] == LIVE_TV_APP_ID:
found_live_tv = True
if app["id"] == self._client.current_appId:
self._current_source = app["title"]
self._source_list[app["title"]] = app
elif (
not conf_sources
or app["id"] in conf_sources
or any(word in app["title"] for word in conf_sources)
or any(word in app["id"] for word in conf_sources)
):
self._source_list[app["title"]] = app
for source in self._client.inputs.values():
if source["appId"] == LIVE_TV_APP_ID:
found_live_tv = True
if source["appId"] == self._client.current_appId:
self._current_source = source["label"]
self._source_list[source["label"]] = source
elif (
not conf_sources
or source["label"] in conf_sources
or any(source["label"].find(word) != -1 for word in conf_sources)
):
self._source_list[source["label"]] = source
# special handling of live tv since this might not appear in the app or input lists in some cases
if not found_live_tv:
app = {"id": LIVE_TV_APP_ID, "title": "Live TV"}
if LIVE_TV_APP_ID == self._client.current_appId:
self._current_source = app["title"]
self._source_list["Live TV"] = app
elif (
not conf_sources
or app["id"] in conf_sources
or any(word in app["title"] for word in conf_sources)
or any(word in app["id"] for word in conf_sources)
):
self._source_list["Live TV"] = app
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
async def async_update(self):
"""Connect."""
if not self._client.is_connected():
try:
await self._client.connect()
except (
OSError,
ConnectionClosed,
ConnectionRefusedError,
asyncio.TimeoutError,
asyncio.CancelledError,
PyLGTVPairException,
PyLGTVCmdException,
):
pass
@property
def unique_id(self):
"""Return the unique id of the device."""
return self._unique_id
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_class(self):
"""Return the device class of the device."""
return DEVICE_CLASS_TV
@property
def state(self):
"""Return the state of the device."""
if self._client.is_on:
return STATE_ON
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._client.muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._client.volume is not None:
return self._client.volume / 100.0
return None
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return sorted(self._source_list.keys())
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._client.current_appId == LIVE_TV_APP_ID:
return MEDIA_TYPE_CHANNEL
return None
@property
def media_title(self):
"""Title of current playing media."""
if (self._client.current_appId == LIVE_TV_APP_ID) and (
self._client.current_channel is not None
):
return self._client.current_channel.get("channelName")
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._client.current_appId in self._client.apps:
icon = self._client.apps[self._client.current_appId]["largeIcon"]
if not icon.startswith("http"):
icon = self._client.apps[self._client.current_appId]["icon"]
return icon
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported = SUPPORT_WEBOSTV
if self._client.sound_output == "external_arc":
supported = supported | SUPPORT_WEBOSTV_VOLUME
elif self._client.sound_output != "lineout":
supported = supported | SUPPORT_WEBOSTV_VOLUME | SUPPORT_VOLUME_SET
if self._on_script:
supported = supported | SUPPORT_TURN_ON
return supported
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if self._client.sound_output is not None and self.state != STATE_OFF:
attributes[ATTR_SOUND_OUTPUT] = self._client.sound_output
return attributes
@cmd
async def async_turn_off(self):
"""Turn off media player."""
await self._client.power_off()
async def async_turn_on(self):
"""Turn on the media player."""
if self._on_script:
await self._on_script.async_run()
@cmd
async def async_volume_up(self):
"""Volume up the media player."""
await self._client.volume_up()
@cmd
async def async_volume_down(self):
"""Volume down media player."""
await self._client.volume_down()
@cmd
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
tv_volume = int(round(volume * 100))
await self._client.set_volume(tv_volume)
@cmd
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._client.set_mute(mute)
@cmd
async def async_select_sound_output(self, sound_output):
"""Select the sound output."""
await self._client.change_sound_output(sound_output)
@cmd
async def async_media_play_pause(self):
"""Simulate play pause media player."""
if self._paused:
await self.async_media_play()
else:
await self.async_media_pause()
@cmd
async def async_select_source(self, source):
"""Select input source."""
source_dict = self._source_list.get(source)
if source_dict is None:
_LOGGER.warning("Source %s not found for %s", source, self.name)
return
if source_dict.get("title"):
await self._client.launch_app(source_dict["id"])
elif source_dict.get("label"):
await self._client.set_input(source_dict["id"])
@cmd
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Call play media type <%s>, Id <%s>", media_type, media_id)
if media_type == MEDIA_TYPE_CHANNEL:
_LOGGER.debug("Searching channel...")
partial_match_channel_id = None
perfect_match_channel_id = None
for channel in self._client.channels:
if media_id == channel["channelNumber"]:
perfect_match_channel_id = channel["channelId"]
continue
if media_id.lower() == channel["channelName"].lower():
perfect_match_channel_id = channel["channelId"]
continue
if media_id.lower() in channel["channelName"].lower():
partial_match_channel_id = channel["channelId"]
if perfect_match_channel_id is not None:
_LOGGER.info(
"Switching to channel <%s> with perfect match",
perfect_match_channel_id,
)
await self._client.set_channel(perfect_match_channel_id)
elif partial_match_channel_id is not None:
_LOGGER.info(
"Switching to channel <%s> with partial match",
partial_match_channel_id,
)
await self._client.set_channel(partial_match_channel_id)
@cmd
async def async_media_play(self):
"""Send play command."""
self._paused = False
await self._client.play()
@cmd
async def async_media_pause(self):
"""Send media pause command to media player."""
self._paused = True
await self._client.pause()
@cmd
async def async_media_stop(self):
"""Send stop command to media player."""
await self._client.stop()
@cmd
async def async_media_next_track(self):
"""Send next track command."""
current_input = self._client.get_input()
if current_input == LIVE_TV_APP_ID:
await self._client.channel_up()
else:
await self._client.fast_forward()
@cmd
async def async_media_previous_track(self):
"""Send the previous track command."""
current_input = self._client.get_input()
if current_input == LIVE_TV_APP_ID:
await self._client.channel_down()
else:
await self._client.rewind()
@cmd
async def async_button(self, button):
"""Send a button press."""
await self._client.button(button)
@cmd
async def async_command(self, command, **kwargs):
"""Send a command."""
await self._client.request(command, payload=kwargs.get(ATTR_PAYLOAD))
|
|
## Automatically adapted for numpy.oldnumeric May 17, 2011 by -c
import re,string,os,sys
debug = False
class Topology(dict):
'''Database modeling a topological map.'''
Line = {}
Content = {}
RouteInstructions = []
TopoAttrib = []
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def add(self,topo):
#if __debug__: print 'Topology add',topo.name,topo
if not self.__dict__.has_key(topo.name):
self.__dict__[topo.name] = {}
self.TopoAttrib.append(topo.name)
self.__dict__[topo.name][topo.id] = topo
def pprint(self):
for k,v in self.__dict__.items():
if type(v)==dict:
print k,':\n\t', '\n\t'.join([str(k1)+'::'+str(v1) for k1,v1 in v.items()])
else: print k,':',v
for ri in self.RouteInstructions: print repr(ri)
def reset(self):
L=self.Line
C=self.Content
for k in self.TopoAttrib:
del self.__dict__[k]
self.TopoAttrib=[]
self.RouteInstructions=[]
self.Line=L
self.Content=C
def get_str_fn(self,name,value,fn='__str__'):
if name[-1] in '0123456789': name = name[:-1]
if self.__dict__.has_key(name):
return eval('self.__dict__[name][value].'+fn+'()')
else: return value
def get_str(self,name,value):
return self.get_str_fn(name,value,fn='__str__')
def get_latex_str(self,name,value):
return self.get_str_fn(name,value,fn='latex_str')
def __getitem__(self,item): return self.__dict__[item]
TopoMap = Topology()
class Topological(object):
'''Interface for topological entities.'''
topology = None
latex_markup=None
latex_content=''
id = ''
def makeNamedPattern(name,regexp):
return ''.join(['(?P<',name,'>',regexp,')'])
makeNamedPattern = staticmethod(makeNamedPattern)
evidenceRegexp=r'\s*\\ev\{(?P<evidence>[0-9,\*]*)\}\{'
commaSpaceRegexp=',[\s~\\;\\!]*'
def markupContent(Markup,Content): return ''.join([Markup,'\{',Content,'\}'])
markupContent = staticmethod(markupContent)
def makeLinePatt(cls,Name,LineID,Patt=None):
if not Patt: Patt = Name
return re.compile('(?P<'+LineID+'>'+cls.evidenceRegexp+Patt+'\(.*)')
makeLinePatt = classmethod(makeLinePatt)
def makeItemPatt(cls,Name,Content):
return re.compile(cls.evidenceRegexp+Name+'\('+Content+'\)\}')
makeItemPatt = classmethod(makeItemPatt)
def initLineParser(cls,topology):
cls.lineID = cls.name+'Line'
cls.topology = topology
#if __debug__: print cls.name,'initLineParser',cls.lineID
cls.linePatt = cls.makeLinePatt(cls.name,cls.lineID,cls.name)
#if __debug__: print 'makeLinePatt',cls.linePatt.pattern
cls.itemPatt = cls.makeItemPatt(cls.name,cls.latex_content)
#if __debug__: print 'makeItemPatt',cls.itemPatt.pattern
initLineParser = classmethod(initLineParser)
def parseLine(cls,results):
#if __debug__: print 'parseLine',results[cls.lineID]
for item in results[cls.lineID].split(';'):
match = cls.itemPatt.match(item)
#if __debug__: print item,match
if match: cls(cls.topology,match.groupdict())
del results[cls.lineID]
del results['evidence']
parseLine = classmethod(parseLine)
evidence={}
def __init__(self,topology,d):
#if __debug__: print self.name,'__init__',d
self.topology = topology
self.evidence={}
if d.has_key('evidence'):
self.evidence[tuple([d[k] for k in self.latex_params])]=d['evidence']
topology.add(self)
def __hash__(self): return self.__str__().__hash__()
def latex_str(self): return ''.join([self.latex_markup, '{', str(self.id), '}'])
def latex_repr(self):
'''Print LaTeX formatted string representation.'''
return ''.join(['\\ev{', ','.join([e for e in self.evidence]), '}',
'{', self.name,'(', self.latex_str(), ')};',])
def addRelation(self,relation,key,value=None):
'''Adds a relation key/value pair to the topological entity.'''
if debug: print 'Topological (', str(self ),')', relation,key,value
if not self.__dict__.has_key(relation): self.__dict__[relation] = {}
self.__dict__[relation][key] = value
def unify(self,other):
'''Combine two topological objects.
@type other: Topological
@param other: self and other must share an isa relationship.
'''
pass
class TopologicalEntity(Topological):
local_topology = Topology()
def __init__(self,topology,d):
self.latex_params=[self.name]
self.id = d[self.name]
if debug: print 'TopologicalEntity (', str(self ),')'
Topological.__init__(self,topology,d)
def registerPattern(cls,topology):
cls.initLineParser(topology)
topology.Line[cls.linePatt]=cls.parseLine
topology.Content[cls.latex_markup]=cls.name
#if __debug__: print cls.name,'registerPattern',cls.linePatt,cls.parseLine
registerPattern = classmethod(registerPattern)
class GlobalDir:
dirs = 'drul'
numDirs = 4
def __init__(self,dir=None):
if type(dir) == type(None):
self.dir = 0
elif type(dir) == str:
self.dir = dirs.find(dir)
elif isinstance(dir, int):
self.dir = dir
elif type(dir) == __name__:
self.dir = dir.dir
else: raise TypeError, 'Unknown type '+str(type(dir))
self.normalize()
def normalize(self):
self.dir %= self.numDirs
return self
def __str__(self):
return dirs[self.dir]
def __repr__(self):
return dirs[self.dir]
def __eq__(self,other):
if type(other) == type(None): return False
return self.dir == other.dir
def __add__(self,other):
if isinstance(other, int):
return GlobalDir(self.dir + other)
elif type(other) == __name__:
return GlobalDir(self.dir + other.dir)
elif type(other) == str:
return dirs[self.dir] + other
else: raise TypeError, 'Unknown type '+type(other)
def __sub__(self,other):
if isinstance(other, int):
return GlobalDir(self.dir - other)
elif type(other) == __name__:
return GlobalDir(self.dir - other.dir)
else: raise TypeError, 'Unknown type '+type(other)
def __neg__(self):
return GlobalDir(self.dir + self.numDirs/2)
def __iadd__(self,other):
if isinstance(other, int):
self.dir += other
elif type(other) == __name__:
self.dir += other.dir
else: raise TypeError, 'Unknown type '+type(other)
return self.normalize()
def __isub__(self,other):
if isinstance(other, int):
self.dir -= other
elif type(other) == __name__:
self.dir -= other.dir
else: raise TypeError, 'Unknown type '+type(other)
return self.normalize()
def add(self,(x,y),d):
if self.dir==0: return (x,y-d) #d
elif self.dir==1: return (x+d,y) #r
elif self.dir==2: return (x,y+d) #u
elif self.dir==3: return (x-d,y) #l
class Path(TopologicalEntity):
'''Representation of an SSH topological path.'''
latex_markup=r'\\sshpath'
name='path'
latex_content=Topological.markupContent(latex_markup,Topological.makeNamedPattern(name,r'[^\}]+'))
def __init__(self,topology,d):
self.dir = {}
self.dir['+'] = PathDir(topology,d,'+')
topology.add(self.dir['+'])
self.dir['-'] = PathDir(topology,d,'-')
topology.add(self.dir['-'])
TopologicalEntity.__init__(self,topology,d)
self.terminates = {}
def fmt(cls,id):
if type(id) == Path: return '"'+str(id)+'"'
else: return '"Pa'+str(id)+'"'
fmt = classmethod(fmt)
def __str__(self): return 'Pa'+self.id
def __repr__(self): return 'Path(\''+self.id+'\')'
class partialOrder:
def __init__(self,partialOrder): self.partialOrder = partialOrder
def cmp(self,x,y):
if (x,y) in self.partialOrder: return -1
elif (y,x) in self.partialOrder: return 1
else: return 0 # Arbitrary
def getTotalPlaceOrder(self):
totalOrder = [self.topology.place[k].latex_str()[1:] for k in self.on]
porder = self.partialOrder(self.dir['+'].order)
totalOrder.sort(porder.cmp)
prefixLen = len(Place.latex_markup)
return [p[prefixLen:-1] for p in totalOrder]
def writeMap(self,pathdir,drawn,placesOrder,arbitraryTurn,mapFile):
# output ghost ends if not terminates
if '-' not in self.terminates:
termPlace = Place.fmt(placesOrder[0])
mapFile.write(termPlace+' ['+str(-pathdir)+'] *{} ='+PathDir.fmt(self.id,'-')+'\n')
mapFile.write(PathDir.fmt(self.id,'-')+':@{/--+} '+termPlace+'\n')
if '+' not in self.terminates:
termPlace = Place.fmt(placesOrder[-1])
mapFile.write(termPlace+' ['+str(pathdir)+'] *{} ='+PathDir.fmt(self.id,'+')+'\n')
mapFile.write(termPlace+':@{/--+} '+PathDir.fmt(self.id,'+')+'\n')
if arbitraryTurn: #xyrefer 6.2
turnFrom = drawn.get('Pa'+arbitraryTurn,1)
arbTurnID = PathDir.fmt(arbitraryTurn,'at')
mapFile.write(Place.fmt(placesOrder[0])+' ['+str(turnFrom)+str(turnFrom-1)+'(0.33)]'+
' *{} ='+arbTurnID+'\n')
mapFile.write(Place.fmt(placesOrder[0])+' ['+str(turnFrom)+str(turnFrom+1)+'(0.33)]'+
' :@/'+str(turnFrom)+'/@{ *{?} } '+arbTurnID+'\n')
#Sanity check ends
mapFile.write(Place.fmt(placesOrder[0])+':@^{/:+} ')
for place in placesOrder[1:-1]: mapFile.write('\''+Place.fmt(place)+' ')
mapFile.write(Place.fmt(placesOrder[-1])+' ')
mapFile.write('^{'+self.latex_str()[1:]+'}\n')
for place1,place2 in zip(placesOrder[:-1],placesOrder[1:]):
mapFile.write(Place.fmt(place1)+' :@^{} '+Place.fmt(place2)+' ')
dist = ('?','Moves')
if self.__dict__.has_key('pathDistance'):
if (place1,place2) in self.pathDistance: dist = self.pathDistance[(place1,place2)]
elif (place2,place1) in self.pathDistance: dist = self.pathDistance[(place2,place1)]
dist = dist[0] #' '.join(dist)
mapFile.write(' ^(.33){-'+dist+'-}\n')
if 'onObject' in self.__dict__:
mapFile.write(Place.fmt(placesOrder[0])+' :@^{} '+Place.fmt(placesOrder[-1])+' ')
mapFile.write('^(0.75){\\textsf{\\txt{'+',\\\\ '.join(self.onObject.keys())+'}}}\n')
annotations = []
for d in ['+','-']:
for annote in ['appear','pathType']:
if annote in self.dir[d].__dict__:
for place,annotation in self.dir[d].__dict__[annote].items():
annotations.append(annotation)
if annotations:
mapFile.write(Place.fmt(place1)+' :@^{} '+Place.fmt(place2)+' _{\\texttt{\\txt{'+',\\\\'.join(annotations)+'}}}\n')
drawn[str(self)] = pathdir
Path.registerPattern(TopoMap)
class Place(TopologicalEntity):
'''Representation of an SSH topological place.'''
latex_markup=r'\\sshplace'
name='place'
latex_content=Topological.markupContent(latex_markup,Topological.makeNamedPattern(name,r'[^\}]+'))
def fmt(cls,id):
if type(id) == Place: return '"'+str(id)+'"'
else: return '"Pl'+str(id)+'"'
fmt = classmethod(fmt)
def __str__(self): return 'Pl'+self.id
def __repr__(self): return 'Place(\''+self.id+'\')'
def writeMap(self,drawn,currentLoc,dist=1,pathDir=None,context=''):
if not dist: dist = 3 # Arbitrary placement leaves room for visible gap.
if pathDir == None: pathDir = GlobalDir()
drawn[str(self)] = pathDir.add(currentLoc,dist)
if dist == 0: disp = ''
if context: context = Place.fmt(context)+' '
return ''.join([context,'[',str(pathDir)*dist,']',' '*(10-dist),
'*{',self.latex_str()[1:],'}*++[o]{}*+\\frm{o} =',self.fmt(self.id),'\n'])
def layout(self,drawn,currentLoc,dist=2,pathdir=None,mapFile=sys.stdout,context=''):
mapFile.write(self.writeMap(drawn,currentLoc,dist,pathdir,context))
if self.__dict__.has_key('at'):
dir = 'rd' #[,'ru','ld','lu']
objects = ','.join(self.at.keys())
mapFile.write(self.fmt(self.id)+' [] !{'+self.fmt(self.id)+'!/'+dir+' 24pt/} *++={\\txt{'+objects+'}}\n')
def layoutPaths(self,drawn,currentLoc,pathdir=None,mapFile=sys.stdout,context=''):
drawn[self.id+':Paths'] = True
paths = self.on.keys()
paths.sort()
refpath = paths[0]
for path in paths[1:] or paths:
if len(paths)>1 and path == refpath: continue
dir=drawn.get('Pa'+refpath,pathdir)
pathObj = self.topology.path[path]
side = None
if refpath != path:
if self.__dict__.has_key('sideof'): side = self.sideof.get((path+'+',refpath+'+'),None)
if side == 'Left': dir -= 1
elif side == 'Right': dir += 1
else: dir -= 1# Arbitary left ### OR fork map
pathOrder = pathObj.getTotalPlaceOrder()
print 'layoutPaths',self,path,'+',refpath,'+',side,str(dir),str(drawn.get('Pa'+refpath,pathdir)),pathOrder
for place in pathOrder:
if 'Pl'+place not in drawn:
if 'pathDistance' in pathObj.__dict__:
if (self.id,place) in pathObj.pathDistance:
distCount,distUnit = pathObj.pathDistance[(self.id,place)]
elif (place,self.id) in pathObj.pathDistance:
distCount,distUnit = pathObj.pathDistance[(place,self.id)]
else: distCount,distUnit = (2,'Move')
distCount = int(distCount)
else: distCount = None
self.topology.place[place].layout(drawn,currentLoc,distCount,dir,mapFile,self)
if str(pathObj) not in drawn:
if refpath != path and not side:
print 'Arbitrary. Draw fuzzy turn for', pathObj, 'from', 'Pa'+refpath
arbitraryTurn=refpath
else: arbitraryTurn=None
pathObj.writeMap(dir,drawn,pathOrder,arbitraryTurn,mapFile)
for place in pathOrder:
if place+':Paths' not in drawn:
self.topology.place[place].layoutPaths(drawn,currentLoc,dir,mapFile,self)
refpath = path
Place.registerPattern(TopoMap)
class Place2(Place):
name='place2'
latex_content=re.sub('>','2>',Place.latex_content)
class PathDir(TopologicalEntity):
'''Representation of an SSH topological path with direction.'''
latex_markup=r'\\sshpathdir'
name='pathdir'
latex_content=Topological.markupContent(
latex_markup,
Topological.makeNamedPattern(name,
Topological.makeNamedPattern('path',r'[^\}]+')+'\}\{'+
Topological.makeNamedPattern('dir','(\+|\-)')) )
latex_params=['path']
id_values=['path','dir']
def __init__(self,topology,d,dir=None):
if d.has_key('pathdir'):
if '}{' in d['pathdir']:
d['pathdir'] = d['pathdir'][:-3]+d['pathdir'][-1]
d['path'] = d['pathdir'][:-1]
d['dir']= d['pathdir'][-1]
elif dir: self.dir=dir
elif d.has_key('dir'): self.dir = d['dir']
else: self.dir = '+'
self.path=d['path']
Topological.__init__(self,topology,d)
self.id=self.path+self.dir
def fmt(cls,id,dir): return '"Pa'+str(id)+dir+'"'
fmt = classmethod(fmt)
def __str__(self): return 'Pa'+self.id
def __repr__(self): return 'PathDir(\''+self.path+','+self.dir+'\')'
def latex_str(self): return ''.join([self.latex_markup, '{', str(self.path), '}{',str(self.dir),'}'])
def extractPathDir(pathdirstr): return re.split('[\{\}]+',pathdirstr)
extractPathDir=staticmethod(extractPathDir)
PathDir.registerPattern(TopoMap)
class PathDir2(PathDir):
name='pathdir2'
latex_content=re.sub('>','2>',PathDir.latex_content)
class PathFragment(TopologicalEntity):
'''Representation of an SSH topological path fragement.'''
latex_markup=r'\\sshpathfragment'
name='pathfragment'
latex_content=Topological.markupContent(latex_markup,PathDir.latex_content+','+Place.latex_content)
def __str__(self): return ''.join('Pa',self.path,self.dir,'@','Pl'+place)
def __repr__(self): return ''.join('Path(\'',self.path,self.dir,',',self.place,'\')')
PathFragment.registerPattern(TopoMap)
class Object(TopologicalEntity):
'''Representation of an SSH topological path with direction.'''
latex_markup=None
name='object'
latex_content=Topological.makeNamedPattern(name,'[^\)\{\}]+')
def __str__(self): return 'Obj_'+self.id
def __repr__(self): return 'Object(\''+self.id+'\')'
Object.registerPattern(TopoMap)
class Thing:
'''Regular expression covering things that can be on paths.'''
name = 'thing'
Things = [Object,Place]
latex_content=Topological.makeNamedPattern(name,'|'.join([t.latex_content for t in Things]))
class Thing2(Thing):
name='thing2'
latex_content=re.sub('>','2>',Thing.latex_content)
class Appear:
name = 'appearance'
latex_content=Topological.makeNamedPattern(name,r'[^\}]+')
class PathType:
name = 'type'
latex_content=Topological.makeNamedPattern(name,r'[^\}]+')
class LocalTopology:
name = 'localTopology'
latex_content=Topological.makeNamedPattern(name,r'[^\}]+')
class PathDistVal:
name = 'pathDistVal'
latex_content=Topological.makeNamedPattern(name,
Topological.makeNamedPattern('distCount',r'[^\},]+')+','+\
Topological.makeNamedPattern('distUnit',r'[^\},]+'))
class TopologicalRelation(Topological):
'''Abstract class for topological relations.'''
def __init__(self,topology,d):
if d.has_key('pathdir'):
if '}{' in d['pathdir']:
d['pathdir'] = d['pathdir'][:-3]+d['pathdir'][-1]
d['path'] = d['pathdir'][:-1]
d['dir']= d['pathdir'][-1]
if d.has_key('pathdir2'):
if '}{' in d['pathdir2']:
d['pathdir2'] = d['pathdir2'][:-3]+d['pathdir2'][-1]
d['path2'] = d['pathdir2'][:-1]
d['dir2']= d['pathdir2'][-1]
Topological.__init__(self,topology,d)
self.id = tuple([topology.get_str(k,d[k]) for k in self.latex_params])
self.latex_markup=None
for k,v in d.items(): self.__dict__[k] = v
self.assertRelation()
def __str__(self): return self.name+str(self.id)
def __repr__(self): return self.name+'('+str(self.__dict__)+')'
def registerPattern(cls,topology):
cls.latex_content=cls.commaSpaceRegexp.join([p.latex_content for p in cls.params])
cls.latex_params=[]
for p in cls.params:
if p.__dict__.has_key('id_values'): cls.latex_params+=p.id_values
else: cls.latex_params.append(p.name)
cls.initLineParser(topology)
topology.Line[cls.linePatt] = cls.parseLine
topology.Content[cls.latex_markup]=cls.name
registerPattern = classmethod(registerPattern)
def assertRelation(self): pass
def latex_str(self): return ',~'.join([self.topology.get_latex_str(p.name,self.__dict__[p.name]) for p in self.params])
class OnRelation(TopologicalRelation):
'''Asserts a place is _on_ a path.'''
name = 'on'
params=[Path,Thing]
def assertRelation(self):
if self.__dict__.has_key('place') and self.place:
self.topology.place[self.place].addRelation(self.name,self.path,self.evidence)
self.topology.path[self.path].addRelation(self.name,self.place,self.evidence)
elif self.__dict__.has_key('object') and self.object:
#self.topology.object[self.object].addRelation(self.name,self.path,self.evidence)
self.topology.path[self.path].addRelation(self.name+'Object',self.object,self.evidence)
OnRelation.registerPattern(TopoMap)
class AppearRelation(TopologicalRelation):
'''Asserts the appearance of a path segment.'''
name = 'appear'
params=[PathDir,Place,Appear]
def assertRelation(self):
self.topology.place[self.place].addRelation(self.name,self.path+self.dir,self.appearance)
self.topology.pathdir[self.path+self.dir].addRelation(self.name,self.place,self.appearance)
AppearRelation.registerPattern(TopoMap)
class OrderRelation(TopologicalRelation):
name = 'order'
params=[PathDir,Thing,Thing2]
def assertRelation(self):
self.topology.pathdir[self.path+self.dir].addRelation(self.name,(self.thing,self.thing2))
OrderRelation.registerPattern(TopoMap)
class SideOfRelation(TopologicalRelation):
name = 'sideof'
params=[Place,PathDir,PathDir2]
def assertRelation(self):
self.topology.place[self.place].addRelation(self.name,(self.path+self.dir,self.path2+self.dir2),self.sideof)
if self.sideof == 'Left': other = 'Right'
elif self.sideof == 'Right': other = 'Left'
else: other = 'UNK'
self.topology.place[self.place].addRelation(self.name,(self.path2+self.dir2,self.path+self.dir),other)
self.name='tothe'+self.sideof+'Of'
self.topology.place[self.place].addRelation(self.name,(self.path+self.dir,self.path2+self.dir2))
def registerPattern(cls,topology):
cls.latex_content=cls.commaSpaceRegexp.join([p.latex_content for p in cls.params])
cls.latex_params=[]
for p in cls.params:
if p.__dict__.has_key('id_values'): cls.latex_params+=p.id_values
else: cls.latex_params.append(p.name)
cls.initLineParser(topology)
cls.lineID = cls.name+'Line'
cls.topology = topology
cls.linePatt = cls.makeLinePatt(cls.name,cls.lineID,'tothe(?P<sideof>(Right|Left))Of')
#if __debug__: print 'makeLinePatt',cls.linePatt.pattern
cls.itemPatt = cls.makeItemPatt('tothe(?P<sideof>(Right|Left))Of',cls.latex_content)
topology.Line[cls.linePatt] = cls.parseLine
topology.Content[cls.latex_markup]=cls.name
registerPattern = classmethod(registerPattern)
SideOfRelation.registerPattern(TopoMap)
class PathTypeRelation(TopologicalRelation):
name='pathType'
params=[PathDir,Place,PathType]
def assertRelation(self):
self.topology.pathdir[self.path+self.dir].addRelation(self.name,self.place,self.type)
PathTypeRelation.registerPattern(TopoMap)
class TerminatesRelation(TopologicalRelation):
name='terminates'
params=[PathDir,Place]
def assertRelation(self):
self.topology.pathdir[self.path+self.dir].addRelation(self.name,self.place)
self.topology.place[self.place].addRelation(self.name,self.path+self.dir)
self.topology.path[self.path].terminates[self.dir] = self.place
TerminatesRelation.registerPattern(TopoMap)
class AtRelation(TopologicalRelation):
name='at'
params=[Place,Object]
def assertRelation(self):
self.topology.place[self.place].addRelation(self.name,self.object)
AtRelation.registerPattern(TopoMap)
class LocalTopologyRelation(TopologicalRelation):
name='localTopology'
params=[Place,LocalTopology]
def assertRelation(self):
self.topology.place[self.place].addRelation(self.name,self.localTopology)
LocalTopologyRelation.registerPattern(TopoMap)
class PathDistanceRelation(TopologicalRelation):
name='pathDistance'
params=[Path,Place,Place2,PathDistVal]
def assertRelation(self):
self.topology.path[self.path].addRelation(self.name,(self.place,self.place2),(self.distCount,self.distUnit))
PathDistanceRelation.registerPattern(TopoMap)
class Instruction(Topological):
class ISlot(Topological):
latex_markup=r'\\islot'
name='islot'
latex_content=Topological.markupContent(latex_markup,Topological.makeNamedPattern(name,r'[-\w]+'))+\
Topological.markupContent('',Topological.makeNamedPattern('islotVal','[^, ]+'))
pattern=re.compile(latex_content)
islotValPatt=re.compile('('+\
Topological.makeNamedPattern('islotValName','[^\{]+')+'\{'+\
Topological.makeNamedPattern('islotVal1','[^\}]+')+'\}'+\
'(\{'+Topological.makeNamedPattern('islotVal2','[^\}]+')+'\})?'+\
')|'+Topological.makeNamedPattern('islotPlain','[^\\\{\}]+'))
name='instruction'
latex_markup=r'\\instruct'
params=[ISlot]
param_name='islots'
latex_params=[param_name]
itemPatt = Topological.makeItemPatt('instruction',
Topological.markupContent(latex_markup, Topological.makeNamedPattern(name,'[-\w]+'))+
Topological.markupContent('',Topological.makeNamedPattern(param_name,'.*')))
linePatt=re.compile(r'\\item'+Topological.makeLinePatt(name,name+'Line',name).pattern)
def __init__(self,topology,d):
Topological.__init__(self,topology,d)
self.action = d['instruction']
self.id = str(len(topology.RouteInstructions)+1)
for islot in d['islots'].split(', '):
m = self.ISlot.pattern.match(islot)
#if __debug__: print 'Instruction __init__:', islot,m
if m:
#if __debug__: print 'Instruction __init__ match dict:', islot,m.groupdict()
d = m.groupdict()
islotmatch=self.ISlot.islotValPatt.match(d['islotVal'])
#if __debug__: print 'Instruction islotmatch:',islotmatch
#if __debug__ and islotmatch: print 'Instruction islotmatch dict:',islotmatch.groupdict()
if islotmatch and islotmatch.groupdict()['islotValName']:
isv = islotmatch.groupdict()
slot = topology.Content['\\'+str(isv['islotValName'])],
#if __debug__: print 'Instruction islotVal slot:',slot[0],isv
#if __debug__: topology.pprint()
if isv['islotVal2']: islotVal=isv['islotVal1']+isv['islotVal2']
else: islotVal=isv['islotVal1']
self.addRelation(d['islot'], topology.__dict__[slot[0]][islotVal])
else: self.addRelation(d['islot'], d['islotVal'])
topology.RouteInstructions.append(self)
#if __debug__: print repr(self)
def __str__(self): return 'RI'+self.id
def __repr__(self): return 'RI(\''+self.id+','+str(self.__dict__)+'\')'
def registerPattern(cls,topology):
topology.Line[cls.linePatt] = cls.parseLine
cls.lineID = cls.name+'Line'
cls.topology = topology
topology.Content[cls.latex_markup]=cls.name
#if __debug__: print 'Instruction: line pattern:', cls.linePatt.pattern
#if __debug__: print 'Instruction: item pattern:', cls.itemPatt.pattern
registerPattern = classmethod(registerPattern)
Instruction.registerPattern(TopoMap)
def dontCareLine(results): print '.',
#TopoMap.Line[re.compile('.*')]=dontCareLine
TopoMap.Line[re.compile('^.topolist.*$')] = dontCareLine
TopoMap.Line[re.compile('^.raggedright$')] = dontCareLine
TopoMap.Line[re.compile('^\s*.(begin|end)\{enumerate\}\s*$')] = dontCareLine
TopoMap.Line[re.compile('^(\}\{?|\s*)(%.*)?$')] = dontCareLine #End of one block
TopoMap.Line[re.compile('^\s*..\s*$')] = dontCareLine #Newline
TopoMap.Line[re.compile('^\s*$')] = dontCareLine #Blank line
TopoMap.Line[re.compile('^.emph\{.*$')] = dontCareLine #Comment
def getRouteSequence(riList):
routePlaces = []
for ri in riList:
if ri.action == 'turn':
place = ri.at.keys()[0]
elif ri.action == 'travel' or ri.action == 'find':
place = ri.__dict__['from'].keys()[0]
elif ri.action == 'declare-goal': pass
routePlaces.append(Place.fmt(place.id)+' ')
if ri.action == 'travel' or ri.action == 'find':
place = ri.to.keys()[0]
routePlaces.append(place.fmt(place.id)+' ')
uniqPlaces = [routePlaces[0]]
for i in range(len(routePlaces)-1):
if routePlaces[i] != routePlaces[i+1]:
uniqPlaces.append(routePlaces[i+1])
return uniqPlaces
def printGraphicalMap(TopoMap,filename,pathdir=None):
pathdir = GlobalDir(pathdir)
print '='*8,file,'Graphical Map','='*8
drawn = {}
mapFile = open(filename,'w')
mapFile.write('\\[\\xy \\xygraph{\n')
# Draw connected places
TopoMap.place.items()[0][1].layout(drawn,(0,0),dist=0,pathdir=pathdir,mapFile=mapFile)
TopoMap.place.items()[0][1].layoutPaths(drawn,(0,0),pathdir=pathdir,mapFile=mapFile)
# Look for and draw any unconnected places
drawnPlaces = [k for k in drawn if k.startswith('Pl')]
drawnPlaces.sort()
for place in TopoMap.place.values():
if str(place) not in drawnPlaces: #Arbitrary 3 dist, Arbitrary direction
place.layout(drawn,(3,3),3,pathdir,mapFile,drawnPlaces[-1][2:])
place.layoutPaths(drawn,(3,3),pathdir,mapFile,drawnPlaces[-1][2:])
drawnPlaces = [k for k in drawn if k.startswith('Pl')]
drawnPlaces.sort()
# Mark route on map
routePlaces = getRouteSequence(TopoMap.RouteInstructions)
if routePlaces:
mapFile.write(routePlaces[0]+' :@{(~)} ')
for rp in routePlaces[1:-1]:
mapFile.write('\''+rp)
mapFile.write(routePlaces[-1])#+' _{route}\n')
mapFile.write('} \\endxy \\]\n')
mapFile.close()
if __name__ == '__main__':
from Parser import parseFile
results={}
for Giver in ['EDA','EMWC','KLS','KXP','TJS','WLH']:
suffix=Giver+'_Grid0_4_5_Dirs_1.txt.tex'
file = 'Topo_'+suffix
print '\n',file
# initialize TopoMap fn
TopoMap.reset()
parseFile(TopoMap.Line, os.path.join('Directions','CorrFullTrees',file), results)
print; TopoMap.pprint(); print
printGraphicalMap(TopoMap,os.path.join('Directions','CorrFullTrees','TopoMap_'+suffix),'l')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations that generate constants.
See the @{$python/constant_op$constants guide}.
@@zeros
@@zeros_like
@@ones
@@ones_like
@@fill
@@constant
@@linspace
@@range
@@random_normal
@@truncated_normal
@@random_uniform
@@random_shuffle
@@random_crop
@@multinomial
@@random_gamma
@@random_poisson
@@set_random_seed
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def _eager_reshape(tensor, shape, ctx):
"""Eager-only version of Reshape op; requires tensor is an eager Tensor."""
attr_t = tensor.dtype.as_datatype_enum
attr_tshape, (shape,) = execute.args_to_matching_eager(
[shape], ctx, dtypes.int32)
attr_tshape = attr_tshape.as_datatype_enum
inputs_flat = [tensor, shape]
attrs = ("T", attr_t, "Tshape", attr_tshape)
result, = execute.execute(
b"Reshape", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_fill(dims, value, ctx):
"""Eager-only version of Fill op; requires value is an eager Tensor."""
attr_t = value.dtype.as_datatype_enum
dims = convert_to_eager_tensor(dims, ctx, dtypes.int32)
inputs_flat = [dims, value]
attrs = ("T", attr_t)
result, = execute.execute(
b"Fill", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_identity(tensor, ctx):
"""Eager-only version of Identity op; requires tensor is an eager Tensor."""
attrs = ("T", tensor.dtype.as_datatype_enum)
result, = execute.execute(
b"Identity", 1, inputs=[tensor], attrs=attrs, ctx=ctx)
return result
def convert_to_eager_tensor(value, ctx, dtype=None):
"""Converts the given `value` to an `EagerTensor`.
Note that this function could return cached copies of created constants for
performance reasons.
Args:
value: value to convert to EagerTensor.
ctx: value of context.context().
dtype: optional desired dtype of the converted EagerTensor.
Returns:
EagerTensor created from value.
Raises:
TypeError: if `dtype` is not compatible with the type of t.
"""
if isinstance(value, ops.EagerTensor):
if dtype is not None and value.dtype != dtype:
raise TypeError("Expected tensor with type %r not %r" % (
dtype, value.dtype))
return value
if dtype is not None:
dtype = dtype.as_datatype_enum
device = ctx.device_name
handle = ctx._handle # pylint: disable=protected-access
if isinstance(value, (float,) + six.integer_types):
# Use a scalar cache. This will put each scalar of each type only once on
# each device. Scalars don't use much device memory but copying scalars can
# trigger memcpys which are slow.
cache_key = device, value, dtype, type(value)
scalar_cache = ctx.scalar_cache()
tensor = scalar_cache.get(cache_key, None)
if tensor is not None:
return tensor
t = ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
scalar_cache[cache_key] = t
return t
else:
return ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
def constant(value, dtype=None, shape=None, name="Const", verify_shape=False):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
ctx = context.context()
if not ctx.in_graph_mode():
t = convert_to_eager_tensor(value, ctx, dtype)
if shape is None:
return t
shape = tensor_shape.as_shape(shape)
if shape == t.shape:
return t
if verify_shape:
raise TypeError("Expected Tensor's shape: %s, got %s." % (tuple(shape),
tuple(t.shape)))
num_t = t.shape.num_elements()
# TODO(josh11b): Implement shape -> eager tensor conversion.
if num_t == shape.num_elements():
return _eager_reshape(t, shape.as_list(), ctx)
if num_t == 1:
if t.dtype == dtypes.bool:
# We don't have a Fill kernel for bool dtype on GPU. So we first run
# Fill on CPU and then copy to GPU if needed.
with ops.device("/device:CPU:0"):
x = _eager_fill(shape.as_list(), t.cpu(), ctx)
return _eager_identity(x, ctx)
else:
return _eager_fill(shape.as_list(), t, ctx)
raise TypeError("Eager execution of tf.constant with unsupported shape "
"(value has %d elements, shape is %s with %d elements)." %
(num_t, shape, shape.num_elements()))
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
const_tensor = g.create_op(
"Const", [], [dtype_value.type],
attrs={"value": tensor_value,
"dtype": dtype_value},
name=name).outputs[0]
return const_tensor
def is_constant(tensor_or_op):
if isinstance(tensor_or_op, ops.Tensor):
op = tensor_or_op.op
else:
op = tensor_or_op
return op.type == "Const"
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.ndarray, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.generic, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s,
dtype=None,
name=None,
as_ref=False):
"""Function to convert TensorShape to Tensor."""
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
s_list = s.as_list()
int64_value = 0
for dim in s_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
if dtype == dtypes.int32 and int64_value:
raise ValueError("Cannot convert a TensorShape to dtype int32; "
"a dimension is too large (%s)" % int64_value)
else:
dtype = dtypes.int64 if int64_value else dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s_list, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d,
dtype=None,
name=None,
as_ref=False):
"""Function to convert Dimension to Tensor."""
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
|
|
import json
import urllib
from hashlib import md5
from django.contrib import messages
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from django.shortcuts import render
from django.http import (
HttpResponse,
HttpResponseRedirect,
HttpResponseNotFound,
HttpResponseBadRequest,
QueryDict,
)
from django.views.generic import View
from django.template.loader import render_to_string
from django.template import RequestContext, Context
from django.utils.safestring import mark_safe
from django.utils.functional import cached_property
from .utils import fuzzy_reverse, random_session_key
from .forms import BForm
COMPONENT_KEYS = {'to_component_class': {}, 'from_component_class': {}}
PAGE_KEYS = {'to_page_class': {}, 'from_page_class': {}}
def should_load_partial_page(request):
"""
Return True if this request should return a partial page, ie only
renders of updated components
Return False if this request should return a full page render
In the usual case, ajax requests return partial pages, and non-ajax
requests render full pages. However, in very rare cases we want to
load a full page via ajax for use in a modal box or something like
that. In which case, we will add the `force_full_page` parameter to
the request.
An example of this are modal solvables on any of the feed pages.
"""
if not request.is_ajax() and request.REQUEST.get('redirect_if_not_ajax'):
# in this case, we're likely redirecting because the page render
# would be too much trouble, so don't bother rendering it
return True
else:
return request.is_ajax() and request.REQUEST.get('force_full_page', 'false') != 'true'
class StrippedRequestInfo(object):
def __init__(self, request_obj, page_key, kwargs, POST=None, passive=False):
# request_obj can be a requests or a StrippedRequestInfo
self.user = request_obj.user
self.method = request_obj.method
# Passes the following bound functions to the other object.
# Should stay bound to the original object:
self.is_ajax = request_obj.is_ajax
self.is_secure = request_obj.is_secure
self.is_execute_request = getattr(request_obj, 'is_execute_request', False)
self.page_key = page_key
self.passive = passive
self.GET = request_obj.GET
self.META = request_obj.META
if hasattr(request_obj, 'LANGUAGE_CODE'):
self.LANGUAGE_CODE = request_obj.LANGUAGE_CODE
self.session = request_obj.session
self.path = request_obj.path
if hasattr(request_obj, 'get_full_path'):
# probably the same as checking isinstance(StrippedRequestInfo)
# but I don't want to change logic at this point.
self.full_path = request_obj.get_full_path()
else:
self.full_path = request_obj.full_path
self._kwargs = kwargs
# make sure we didn't just put None in one of these
# This test may be outdated now that we're passing GET around for passive
assert self.passive or POST is not None
if POST is not None: # remember {} is not the same as None
self.POST = POST
class ComponentBadRequestData(BaseException):
pass
class ComponentError(BaseException):
pass
class ObjectCache(object):
def __init__(self, init=None):
self.data = {}
if init:
for key in init:
if init[key] is not None:
self.data[key] = init[key]
def __call__(self, key, func):
if key not in self.data:
self.data[key] = func()
return self.data[key]
def reset(self, key):
if key in self.data:
del self.data[key]
def set(self, key, val):
"""
Like __call__, but doesn't take a function.
Useful if you already have the object (example: when it's initially created)
"""
self.data[key] = val
return val
@staticmethod
def get_key_for_child_component(raw_key, kwargs):
kwargs = kwargs or {}
return u"%s[%s]" % (raw_key, md5(unicode(sorted(kwargs.items()))).hexdigest())
class AttributeDict(dict):
"""
Subclass of dict to support attribute-based value assignment:
> my_dict.foo_bar = "binbaz"
> print my_dict.foo_bar
binbaz
"""
def __setattr__(self, key, val):
if key in ATTRIBUTE_DICT_RESTRICTED_ARGS:
raise KeyError("'%s' is a restricted argument for AttributeDict")
self[key] = val
def __getattr__(self, key):
return self[key]
ATTRIBUTE_DICT_RESTRICTED_ARGS = dir(AttributeDict)
class ComponentsRenderDict(dict):
"""
Subclass of dict to throw a different exception so that
it will throw an exception in the template
"""
def __init__(self, *args, **kwargs):
super(ComponentsRenderDict, self).__init__(*args, **kwargs)
self.not_added_component_keys = []
self.non_existent_component_keys = []
self.accessed_keys = []
def __getitem__(self, key):
try:
value = super(ComponentsRenderDict, self).__getitem__(key)
except KeyError:
if key not in COMPONENT_KEYS['to_component_class']:
self.non_existent_component_keys.append(key)
else:
self.not_added_component_keys.append(key)
raise
self.accessed_keys.append(key)
return value
class FrameworkBaseMixin(object):
def check_guard(self, component):
if self.guard_fail:
# if the deferred component has a "worse" guard failure use it
if (not self.guard_fail['always_redirect']
and component.guard_fail
and component.guard_fail['always_redirect']):
self.guard_fail = component.guard_fail
else:
self.guard_fail = component.guard_fail
@property
def is_moderator(self):
return (self.user.is_active
and (self.user.is_staff
or self.user.profile.flags.is_discussions_moderator))
def get_param_key(self, component_key, kwargs):
return md5(reverse(component_key, kwargs=kwargs)).hexdigest()
class Component(FrameworkBaseMixin):
"""
A `Component` represents a chunk of content on our site and allows for
advanced manipulation and interaction of the content in a structured and
standardized way.
`Component`s each have their own url specified by a `component_url`, but
you can't point a browser to a `Component`'s url unless the `component_url`
is also given a `PageClass`.
`Component`s who don't have `PageClass`es can still be used in a variety
of ways:
1) They can be added as a *secondary component* to a `Page` using the
`add_component` method on `Page`.
2) If used on a `Page` with any other `Component` that has a `handler`
method that `Component` can add another `Component` as a
*dependent component* by using the `add_dependent_component`
method on `Component` (Note, you must `guard_dependent_component`
first)
3) They can be added as a *child component* to another `Component`
by using the `add_child_component` method on `Component`
Note that any `Component` is basically the same as any other `Component`
and the only thing that determines how they can be used is how you use them.
For example, it is possible to use a component as both a secondary
component and a child component on two different pages.
Independent of all of above ways `Component`s can be used, you can also
alter the behaviour:
1) The main way being converting it into a *deferred component*
which can simply be done by setting the `deferred` attribute
to True on the `Component`. This makes it so that where possible,
the *Component Framework* will not render the `Component` in the
same request as any other `Component`s and will instead have the
*Component Framework* javascript dispatch a request to grab the
contexts of that `Component` independently. This is typically
done for `Component` that have poor performance and aren't the
primary information that the user wants to look at when they end up
on the page.
2) A more advanced thing is to use `override_page_key` which is only
used if you are working on a `Component` that has a `PageClass`
attached but is used on other `Page`s also. What it does is make
it so that if you POST to the `Component` from another `Page`
it will ignore the other page and instead use the guards/page
template of it's own `Page` where applicable (for instance if there
are form errors on a non-ajax POST and the `Page` needs to be
rerendered with the error messages shown).
"""
###########
# attributes to set/change to Customize your Component
###########
template_name = None
# Set to True to defer a component.
deferred = False
# Set to True if POSTing to this component should use this Component's
# page_key (aka component_key) rather than the page_key passed in through
# the form.
override_page_key = False
def __init__(self, request_info, obj_cache, response_message=None, guard_only=False, param_key=None):
self.component_key = self.get_component_key()
self.request_info = request_info
self.user = request_info.user
self.kwargs = request_info._kwargs
self.dependent_request_info = StrippedRequestInfo(self.request_info,
self.request_info.page_key,
self.request_info._kwargs,
passive=True)
self.ctx = AttributeDict()
self.response_message = response_message or {}
self.extra_response_headers = {}
# This stands for 'parameterized key', it consists of an md5
# of the url of the component with url arguments
# included. It's for components where we need to specify kwargs
# rather than just using the ones from the original request.
self.param_key = param_key
self.blank = False
self.dependent_components = []
self.dependent_component_classes = []
self.guarded_dependent_component_classes = []
self.child_component_classes = []
self.child_components = []
self.obj_cache = obj_cache
self.run_guards()
if not self.guard_fail and not guard_only and not self.defer_this_request(request_info):
self.init()
###########
# Methods to override to customize your component
###########
def is_deferred(self):
"""
The way the framework user defers a component conditionally.
This method is only called once per component by the framework.
"""
return self.deferred
def guard(self):
"""
This method "guards" the component from being run in a situation
that it shouldn't be run for.
See `guard_active_user` for an example of what to return.
Returns `None` when there were no failures.
"""
pass
guard.original = True
def init(self):
"""
Use `init` to initialize variables for use in the handler and
the template context.
If you just need something for the component's other methods, you
can store stuff to `self` (`self.my_variable = True`)
If you need something for the template (and possibly the component)
you can store it to `self.ctx` (`self.ctx.my_variable = True`)
"""
pass
def handler(self, request):
"""
This method should contain the actions that happen given a POST
request to this component's url.
It should return True if the POST succeeded, and None if the POST
form validation failed. Can also return an HttpResponseRedirect
for the view to return if you have a special case.
You can also `self.add_dependent_component` here,
and also alter `self.ctx` as desired.
"""
pass
def final(self):
"""
Runs after init and handler. This is to do any calculations and
set any template context that should come after handler but needs
to happen even when handler isn't run.
"""
pass
def login_redirect_next_url(self, include_get=False):
"""
After logging in, what is the URL the user should be forwarded
to? This is meant to be overridden by certain components,
particularly ones whose kwargs don't match up with the Page's.
"""
get_string = ''
if include_get:
get_string = '?%s' % self.request_info.GET.urlencode()
try:
return "%s%s" % (fuzzy_reverse(self.request_info.page_key,
kwargs=self.kwargs),
get_string)
except NoReverseMatch:
raise BaseException('Page required kwargs not used in the Component')
###########
# Methods you can call from your components
###########
def is_post(self):
return not self.request_info.passive and self.request_info.method == "POST"
def guard_dependent_component(self, DependentComponentClass):
"""
This method should be called from a component's `guard` method only
when the component is NOT passive to guard Components who will
possibly be added as dependent components in this request.
Example:
def guard(self):
guard_response = self.guard_active_user()
if (not self.request_info.passive) and guard_response is None:
self.guard_dependent_component(PointsComponent)
return guard_response
"""
if self.request_info.passive:
raise ComponentError("Passive components shouldn't guard dependent components. "
"This prevents infinite loops.")
else:
self.guarded_dependent_component_classes.append(DependentComponentClass)
def add_dependent_component(self, DependentComponentClass):
"""
Use this method to tell the framework that a component has content
that may have changed based on the handler of the `self` component.
Dependent components should should be passive (read-only).
Note: Make sure the component is properly guarded with
`guard_dependent_component` first.
"""
if (DependentComponentClass not in self.guarded_dependent_component_classes
and DependentComponentClass.has_guard()):
raise ComponentError(
"Tried adding %s as a dependent component for %s, "
"without guarding it first (and it has a guard defined)." %
(DependentComponentClass.get_component_key(), self.component_key)
)
return
if self.request_info.passive:
raise ComponentError("You shouldn't add a dependent component if you're passive. "
"This prevents infinite loops.")
else:
self.dependent_component_classes.append(DependentComponentClass)
def add_child_component(self, ChildComponentClass, kwargs=None, obj_cache_init=None):
"""
Adds the child component, and initializes the obj_cache keys for
this child component's particular set of args/kwargs
"""
if obj_cache_init:
for raw_key, val in obj_cache_init.iteritems():
child_specific_key = self.obj_cache.get_key_for_child_component(raw_key, kwargs)
self.obj_cache.set(child_specific_key, val)
self.child_component_classes.append((ChildComponentClass, kwargs))
def form_init(self):
"""
When you create a form with BForm (which is standard practice) it
requires 'page_key' be added to the initialization
of the form so that they can be added as hidden fields.
Example:
self.ctx.form = BForm(**self.form_init())
"""
if not hasattr(self, '_form_init_cached'):
self._form_init_cached = {'page_key': self.request_info.page_key}
if self.param_key is not None:
self._form_init_cached['param_key'] = self.param_key
return self._form_init_cached
def set_message(self, message_type, message_text=''):
self.response_message['message_type'] = message_type
self.response_message['message_text'] = message_text
def this_url(self):
return self.component_reverse(kwargs=self.kwargs)
@classmethod
def component_reverse(cls, kwargs=None):
kwargs = kwargs or {}
return reverse(cls.get_component_key(), kwargs=kwargs)
def this_url_fuzzy(self):
return self.fuzzy_component_reverse(kwargs=self.kwargs)
@classmethod
def fuzzy_component_reverse(cls, kwargs=None):
kwargs = kwargs or {}
return fuzzy_reverse(cls.get_component_key(), kwargs=kwargs)
@classmethod
def get_component_key(cls):
return COMPONENT_KEYS['from_component_class'][cls]
# Guards that can be used in other guards
def guard_active_user(self, include_get=False):
"""
Example:
def guard(self):
return self.guard_active_user()
"""
if not self.user.is_active:
return self.login_redirect_dict(include_get=include_get)
def guard_active_user_on_post(self):
if self.is_post():
return self.guard_active_user()
def guard_staff_user(self):
if not self.user.is_staff:
return self.login_redirect_dict(str_error='You must be Staff to do that')
### Guard helper functions
def login_redirect_url(self, include_get=False):
login_url = getattr(settings, 'LOGIN_URL', '/')
next_url = self.login_redirect_next_url(include_get=include_get)
if next_url:
return "%s?next=%s" % (login_url, urllib.quote_plus(next_url))
else:
return login_url
def login_redirect_dict(self,
str_error='You must be logged in to view that page',
include_get=False):
return {'str_error': str_error,
'redirect_url': self.login_redirect_url(include_get=include_get),
'always_redirect': True}
###########
# Internal methods
###########
@cached_property
def component_is_deferred(self):
"""
The way the framework checks if a component is deferred.
"""
# bool to make it safe to return None
return bool(self.is_deferred())
def run_guards(self):
self.guard_fail = self.guard()
for ComponentClass in self.guarded_dependent_component_classes:
component = ComponentClass(self.dependent_request_info, self.obj_cache, guard_only=True)
self.check_guard(component)
def get_response_action_tuple(self, request):
return ((self.param_key or self.component_key), self.response_action_dict(request))
def response_action_dict(self, request):
return {
'new_html': self.render(request),
'component_key': self.component_key
}
def _get_context(self, request):
final_context = self.ctx
message_type = self.response_message.get('message_type')
message_text = self.response_message.get('message_text')
if message_type is not None:
final_context['message_type'] = message_type
if message_text is not None:
final_context['message_text'] = message_text
final_context['request_info'] = self.request_info
final_context['components'] = ComponentsRenderDict(self.render_child_components(request))
final_context['component_info'] = self._get_component_info()
return final_context
def _get_component_info(self):
"""
Used in the template context for the component and also for
the deferred template message.
"""
return {
'url': lambda: fuzzy_reverse(self.component_key, kwargs=self.kwargs),
'page_url': lambda: fuzzy_reverse(self.request_info.page_key, kwargs=self.kwargs),
'component_key': self.component_key,
'param_key': self.param_key,
'page_key': self.request_info.page_key,
}
def render_debug_extra(self):
return mark_safe("""<div class="debug_component_info" style="display:none">
component_key: %s; template_name: %s
</div>""" % (self.component_key, self.template_name))
def render(self, request, is_child=False):
if self.blank:
render_output = self._render_blank(request)
elif self.defer_this_request(request, is_child):
render_output = self._render_deferred(request)
else:
render_output = self._render(request)
if getattr(settings, 'DEBUG', False) and getattr(settings, 'COMPONENT_DEBUG_INFO', True):
render_output = self.render_debug_extra() + render_output
return render_output
def _render(self, request):
context = RequestContext(request, self._get_context(request))
return render_to_string(self.template_name, context_instance=context)
def _render_deferred(self, request):
uastr = request.META.get('HTTP_USER_AGENT', '').lower()
# don't show the no-js warning to search bots -- they see it 5 times
# on a page and think it's important
bots = (
'googlebot', 'mediapartners', 'adsbot', # google
'bingbot', 'adidxbot', 'msnbot', 'bingpreview', # bing, yahoo
)
search_bot = any(bot in uastr for bot in bots)
context = Context({
'component_info': self._get_component_info(),
'get_params': request.META.get('QUERY_STRING', '').replace('force_full_page=true', '_=_'),
'search_bot': search_bot,
})
return render_to_string('includes/defer_loading.html', context_instance=context)
def _render_blank(self, request):
"""Render blank (or show debug info) if a component
fails to be showable. This is usually an error state."""
debug_str = "No render available for %s - %s - %s" % (
self.component_key, self.param_key, self.request_info._kwargs
)
if getattr(settings, 'DEBUG', False):
return debug_str
if not self.user.is_staff:
# Staff may see blank things that are incomplete
raise ComponentError(debug_str)
return ""
def render_child_components(self, request):
child_renders = []
for child in self.child_components:
key = child.param_key or child.component_key
child_renders.append((key, child.render(request, is_child=True)))
return child_renders
def run_handler(self, request):
return self.handler(request)
def defer_this_request(self, request, is_child=False):
if not self.component_is_deferred:
return False
if is_child and getattr(self, 'defer_as_child', False):
return True
# Not deferred if this component is being POSTed to or is no_js
# request or deferred is set.
# Note: The frontend sends `deferred=true` when it is requesting
# the content for a previously deferred component
return (not self.is_post()
and not request.GET.get('no_js', False)
and not request.GET.get('deferred') == 'true')
def init_child_components(self, request_info):
"""
Initialize child components. This must happen after this
component's init and before render.
The idea is that this function may be called from Page or
ComponentView to recursively elaborate all child components
"""
for ComponentClass, kwargs in self.child_component_classes:
component_key = ComponentClass.get_component_key()
if kwargs is not None:
param_key = self.get_param_key(component_key, kwargs)
else:
kwargs = request_info._kwargs
param_key = None
# We don't want to accidently post to one of the children here.
request_info = StrippedRequestInfo(request_info, request_info.page_key,
kwargs, passive=True)
component = ComponentClass(request_info, self.obj_cache, param_key=param_key)
if component.guard_fail:
component.blank = True
elif not component.defer_this_request(request_info, is_child=True):
component.final()
component.init_child_components(request_info)
self.child_components.append(component)
def init_dependent_components(self, request):
"""
Initialize dependent components that have been added via
the handler. This is deferred to make sure it runs after both
the handler has done any relevant updates on data in obj_cache.
"""
if should_load_partial_page(request):
for ComponentClass in self.dependent_component_classes:
new_component = ComponentClass(self.dependent_request_info, self.obj_cache)
new_component.final()
new_component.init_child_components(self.request_info)
self.dependent_components.append(new_component)
@classmethod
def has_guard(cls):
return not getattr(cls.guard, 'original', False)
class BasicFormComponent(Component):
"""
For components that handle a basic form
You can extend form_class if you want and even use a form that takes extra
kwargs using `extra_form_init_kwargs`
NOTE: instead of `init` use `final` or make sure you include `super`
Similarly, use `success_handler` instead of `handler`
The template will get `form` initially and on form validation failure (and
also if you don't return True or HttpResponseRedirect from `success_handler`)
and also it will have `this_url` for POSTing to.
"""
form_class = BForm
def extra_form_init_kwargs(self):
"""
Extend this if you want to use a form that requires extra initialization
"""
return {}
def _get_kwargs(self):
kwargs = self.form_init()
kwargs.update(self.extra_form_init_kwargs())
return kwargs
def extra_init(self):
pass
def init(self):
self.extra_init()
self.ctx.this_url = self.this_url_fuzzy()
if not self.is_post():
self.ctx.form = self.form_class(**self._get_kwargs())
def success_handler(self, request):
raise NotImplementedError('Please implement a success_handler(self, request)')
def handler(self, request):
self.ctx.form = self.form_class(request.POST, request.FILES, **self._get_kwargs())
if self.ctx.form.is_valid():
return self.success_handler(request)
class Page(FrameworkBaseMixin):
"""
`Component`s define subsections of html on a page. The renderings are
typically displayed inside of a div tag. `Component`s do not deal with
items such as the header and footer, typically those elements are part
of the `Page`s template.
`Page`s only exist for urls that will be rendered synchronously.
urls that exist only to support ajax loading don't have `Page`s attached
to them. You attach a `Page` to a `Component` (and thus give it a url that
can be navigated to) by adding it using the `PageClass` keyword argument
on the `Component`'s `component_url` definition in a urls file.
Note: Both `Page`'s and `Component`'s don't use traditional `url`
definition methods, they both need `component_url`.
If we consider a django view, which is referenced by a url and renders
a single main template as a single part, the ComponentFramework renders
multiple parts separately:
1) a primary component referenced in the url spec
2) ancillary components specified by the page or by other components
3) the page, also referenced in the url spec; glues everything together
`Page`s should not depend on changes in the request, if you want something
to change, it should be made into another Component so that it can be
updated using `add_dependent_component`.
Similarly, you can't `POST` to a page.
"""
template_name = None
def __init__(self, obj_cache, component=None, request_info=None,
response_message=None, guard_only=False, **kwargs):
# The else can probably never happen anymore; get_page always
# acts on a component
if component:
self.request_info = component.request_info
else:
self.request_info = request_info
self.user = self.request_info.user
self.kwargs = self.request_info._kwargs
# The key of the Page's principal component. This may be different
# from component.component_key if, for instance, a ancillary component
# is being posted to.
self.page_key = PAGE_KEYS['from_page_class'][self.__class__]
self.new_component_request_info = StrippedRequestInfo(
self.request_info, self.request_info.page_key,
self.request_info._kwargs, passive=True)
self.response_message = response_message or {}
self.obj_cache = obj_cache
self.ctx = AttributeDict()
self.guard_only = guard_only
self.guard_done = False
self.set_components_full(requested_component=component)
self.run_guards()
if not self.guard_only:
if self.guard_fail:
raise ComponentError("By the time non-guard_only Page comes around, "
"Page should have passed all guards.")
self.guard_done = True
self.init()
self.components_render_dict = ComponentsRenderDict()
##########
# Methods you can override in your `Page` class
##########
def set_components(self):
"""
Override set_components to add secondary components to the page
using the `add_component` method.
"""
pass
def init(self):
"""
Same as Component.init basically.
"""
pass
def get_page_context(self):
"""
Add variables to the template context.
NOTE: Depricated, use self.ctx.template_var_name = value in `init`.
"""
return {}
##########
# Methods to call from your `Page` class methods:
##########
def add_component(self, NewComponentClass, kwargs=None):
"""
This takes a component class, initializes it, and adds the
initialized object to self.components, also adding the class
to self.component_classes
Add the component iff it has not already been added.
"""
if self.guard_done:
raise ComponentError("You should add components in set_components, not init")
if NewComponentClass not in COMPONENT_KEYS['from_component_class']:
raise ComponentError("%s not registered (via urls.py)" % NewComponentClass.__name__)
new_component_key = COMPONENT_KEYS['from_component_class'][NewComponentClass]
if kwargs is not None:
lookup_key = md5(reverse(new_component_key, kwargs=kwargs)).hexdigest()
else:
lookup_key = new_component_key
if lookup_key in self.components:
return
if self.response_message.get('component_key') == new_component_key:
component_response_message = self.response_message
else:
component_response_message = {}
if kwargs is not None:
param_key = lookup_key
ncri = self.new_component_request_info
request_info = StrippedRequestInfo(
ncri, ncri.page_key, kwargs,
POST=None, passive=ncri.passive)
else:
param_key = None
request_info = self.new_component_request_info
new_component = NewComponentClass(
request_info, self.obj_cache,
response_message=component_response_message,
guard_only=self.guard_only, param_key=param_key)
if not self.guard_only and not new_component.defer_this_request(self.request_info):
new_component.final()
new_component.init_child_components(self.request_info)
self.components[lookup_key] = new_component
self.component_classes[lookup_key] = NewComponentClass
def this_url(self):
return self.page_reverse(kwargs=self.kwargs)
@classmethod
def page_reverse(cls, kwargs=None):
kwargs = kwargs or {}
page_key = PAGE_KEYS['from_page_class'][cls]
return reverse(page_key, kwargs=kwargs)
############
# Internal methods:
############
def set_components_full(self, requested_component):
"""
Adds all the components for the Page, including those explicitly
specified by the derived class, and also some added by default.
This is not designed for overriding.
"""
self.components = {}
self.component_classes = {}
if requested_component:
self.components[requested_component.component_key] = requested_component
self.component_classes[requested_component.component_key] = requested_component.__class__
self.set_components()
# always add the page's primary component by default, if it hasn't been added
primary_component_class = COMPONENT_KEYS['to_component_class'][self.page_key]
self.add_component(primary_component_class)
def _get_context(self, request):
final_context = self.ctx
final_context.update(self.get_page_context())
final_context['components'] = self.components_render_dict
final_context['has_component'] = dict.fromkeys(self.components.keys(), True)
for key, component in self.components.items():
final_context['components'][key] = component.render(request)
final_context['request_info'] = self.request_info
return final_context
def run_guards(self):
self.guard_fail = None
for component in self.components.itervalues():
self.check_guard(component)
@staticmethod
def _add_component_class_name(component_key):
return "%s (%s)" % (COMPONENT_KEYS['to_component_class'][component_key].__name__,
component_key)
def handle_component_key_errors(self):
if self.components_render_dict.non_existent_component_keys:
msg = ("Undefined component key(s): %s \n\n Make sure you add them in urlconf "
"or check your spelling. Available ones are: \n\n %s"
% ("\n".join(self.components_render_dict.non_existent_component_keys),
"\n".join(map(self._add_component_class_name,
COMPONENT_KEYS['to_component_class'].keys()))))
raise ComponentError(msg)
if self.components_render_dict.not_added_component_keys:
msg = ("Attempting to display unprocessed component(s): %s. \n\n "
"Make sure you add them in set_components of your Page"
% ("\n".join(map(self._add_component_class_name,
self.components_render_dict.not_added_component_keys))))
raise ComponentError(msg)
if (getattr(settings, 'DEBUG', False)
and not getattr(settings, 'SKIP_UNUSED_COMPONENTS_EXCEPTION', False)):
unused_components = []
for key, component in self.components.items():
if key not in self.components_render_dict.accessed_keys:
unused_components.append(component.component_key)
if unused_components:
msg = ("You added the following components which were not used "
"in the template for this request:\n\n%s"
% "\n".join(map(self._add_component_class_name, unused_components)))
raise ComponentError(msg)
def get_page_key(request, component_class):
page_key = request.REQUEST.get('page_key')
if isinstance(page_key, list):
page_key = page_key[0]
is_main_component = ((not page_key)
and request.method in ['GET', 'HEAD']
and (not should_load_partial_page(request)))
if is_main_component or component_class.override_page_key:
# presuming this component is the main component of a page,
# it should share the same key as the page
page_key = component_class.get_component_key()
if page_key not in PAGE_KEYS['to_page_class']:
if "Googlebot" not in request.META.get('HTTP_USER_AGENT', ''):
raise ComponentBadRequestData("Didn't get a valid page_key! got: %s at %s"
% (page_key, request.path))
# Signal to return 404 without the exception so DebugIssue gets
# saved into the DB without a rollback. Also, perhaps we shouldn't
# 404 for non-post + non-ajax cases? Or there still may be no choice
# since we'd need the page key?
return None
return page_key
def get_page(request,
obj_cache,
component=None,
component_class=None,
request_info=None,
response_message=None,
guard_only=False):
if component:
component_class = component.__class__
request_info = component.request_info
PageClass = PAGE_KEYS['to_page_class'][get_page_key(request, component_class)]
return PageClass(obj_cache,
component=component,
request_info=request_info,
response_message=response_message,
guard_only=guard_only)
class ComponentView(FrameworkBaseMixin, View):
"""
The view that handles all components.
Which component it handles is specified in URLConf.
"""
# Needed so that as_view can accept it.
component_key = None
ComponentClass = None
init_obj_cache = None
def __init__(self, **initkwargs):
initkwargs['init_obj_cache'] = initkwargs.get('init_obj_cache', {})
super(ComponentView, self).__init__(**initkwargs)
# This should run on server startup, so no surprise 500s
if not initkwargs.get('component_key') and initkwargs.get('ComponentClass'):
raise ValueError("ComponentView needs a ComponentClass passed into as_view")
self.component_key = initkwargs['component_key']
self.ComponentClass = initkwargs['ComponentClass']
def _get_component(self, request, kwargs):
"""
This instantiates the attached component class object, runs guards, and
(if the request type is ajax) calls the function's init.
It returns the a two-tuple consisting of the component, and a
boolean indicating if the component guard has failed.
"""
request_info = StrippedRequestInfo(request, self.page_key, kwargs, POST=request.POST)
if self.response_message and self.response_message['component_key'] == self.component_key:
response_message = self.response_message
else:
response_message = {}
if should_load_partial_page(request):
component = self.ComponentClass(request_info, self.obj_cache,
response_message=response_message,
param_key=request.REQUEST.get('param_key'))
return component, component.guard_fail
else:
guard_component = self.ComponentClass(
request_info, self.obj_cache, response_message=response_message,
guard_only=True)
if not getattr(guard_component, 'bypass_page_guard', False):
guard_page = get_page(
self.request, self.obj_cache, component=guard_component,
response_message=self.response_message, guard_only=True)
if guard_page.guard_fail:
return None, guard_page.guard_fail
# Get an initialized component
component = self.ComponentClass(request_info, self.obj_cache,
response_message=response_message)
return component, component.guard_fail
def sanity_check(self, request):
"""
Just some quick checks, so we don't redirect to external sites
"""
if request.REQUEST.get('redirect_if_not_ajax'):
if request.REQUEST['redirect_if_not_ajax'][0] != "/":
# require relative links
return False
if request.REQUEST['redirect_if_not_ajax'][:2] == "//":
# //someotherhost.com would work
return False
return True
def _common_init(self, request, kwargs, grab_submit_success=False):
if not self.sanity_check(request):
return HttpResponseBadRequest()
self.obj_cache = ObjectCache(init=self.init_obj_cache)
self.page_key = get_page_key(request, self.ComponentClass)
if self.page_key is None:
return HttpResponseNotFound(render_to_string('404.html'))
self.response_message = None
if grab_submit_success:
submit_success = ((not should_load_partial_page(self.request))
and self.request.GET.get('submit_success'))
if submit_success:
self.response_message = self.request.session.get(
'success_data:' + submit_success)
if self.response_message is not None:
del self.request.session['success_data:' + submit_success]
self.component, guard_fail = self._get_component(request, kwargs)
if guard_fail:
return self._get_guard_fail_response(request, kwargs, guard_fail)
def post(self, request, **kwargs):
"""
Post should only ever target one component, either via ajax,
or via feeding all the other components the request with the
POST data stripped from it.
The framework referes to idempotent methods as "passive" and
the converse as "active", although the only idempotent action
supported is GET and the only non-idempotent method is POST.
"""
ret = self._common_init(request, kwargs)
if ret is not None:
return ret
self.response_message = None
# Run the handler. Get a response, if any.
handler_result = self.component.run_handler(request)
self.component.final()
passive_ri = StrippedRequestInfo(request, self.page_key, kwargs, passive=True)
self.component.init_child_components(passive_ri)
self.component.init_dependent_components(request)
return self._get_http_response(handler_result, kwargs)
def get(self, request, **kwargs):
ret = self._common_init(request, kwargs,
grab_submit_success=True)
if ret is not None:
return ret
if not self.component.defer_this_request(request):
self.component.final()
passive_ri = StrippedRequestInfo(request, self.page_key, kwargs, passive=True)
self.component.init_child_components(passive_ri)
return self._get_http_response(None, kwargs)
def _get_http_response(self, handler_result, component_kwargs):
# Handle ajax vs non-ajax requests
if not self.request.is_ajax() and self.request.REQUEST.get('redirect_if_not_ajax'):
response = HttpResponseRedirect(self.request.REQUEST['redirect_if_not_ajax'])
return self._add_response_headers(response)
elif should_load_partial_page(self.request):
# Load just the component(s) that we need to
# (primary component of the request + dependent compontents)
if isinstance(handler_result, HttpResponseRedirect):
response_dict = {'redirect': handler_result["Location"]}
else:
actions = [self.component.get_response_action_tuple(self.request)]
actions += [c.get_response_action_tuple(self.request)
for c
in self.component.dependent_components]
response_dict = {
'actions': dict(actions),
}
response = json_response(response_dict)
if "redirect" in response_dict:
setattr(response, "form_submit_redirect", True)
return self._add_response_headers(response)
else:
# full page request
if isinstance(handler_result, HttpResponseRedirect):
return self._add_response_headers(handler_result)
elif handler_result is True:
# POST request succeded, so auto-redirect for full page request
get_string = ''
if ('message_type' in self.component.response_message
or 'message_text' in self.component.response_message):
data_session_key = random_session_key(self.request.session,
prefix="success_data:")
self.request.session['success_data:' + data_session_key] = {
'component_key': self.component.component_key,
'message_type': self.component.response_message['message_type'],
'message_text': self.component.response_message['message_text'],
}
get_string = "?submit_success=" + data_session_key
return self._add_response_headers(HttpResponseRedirect(
fuzzy_reverse(self.page_key, kwargs=component_kwargs) + get_string))
else:
# Show full page when not directed otherwise.
page = get_page(self.request,
self.obj_cache,
component=self.component,
response_message=self.response_message)
page_render = render(self.request,
page.template_name,
page._get_context(self.request))
page.handle_component_key_errors()
return self._add_response_headers(page_render)
def _get_guard_fail_response(self, request, kwargs, guard_fail):
if request.is_ajax():
return self._add_response_headers(json_response(
{
"messages": ([{'message_type': 'error',
'message_text': guard_fail['str_error']}]
if ((not guard_fail['always_redirect'])
and ('str_error' in guard_fail))
else None),
"redirect": (guard_fail['redirect_url']
if guard_fail['always_redirect']
else None)
}
))
else:
if 'str_error' in guard_fail:
messages.error(request, guard_fail['str_error'])
return self._add_response_headers(HttpResponseRedirect(guard_fail['redirect_url']))
def _add_response_headers(self, response):
"""
Add any custom headers to the response object
"""
if not hasattr(self, "component") or not self.component:
return response
all_components = self.component.dependent_components + [self.component]
for component in all_components:
for key, value in component.extra_response_headers.items():
if isinstance(key, unicode):
key = key.encode("utf-8")
if isinstance(value, unicode):
value = value.encode("utf-8")
response[key] = value
return response
def execute_request(request,
url_name,
args=None,
init_obj_cache=None,
kwargs=None,
full_passthrough=False):
"""
Loads a component and returns its render as the response to the
current request.
Note that this forces the request to GET and eliminates
parameters. That means it breaks any view for loading
via ajax.
"""
if init_obj_cache is None:
init_obj_cache = {}
if args is None:
args = []
ComponentClass = COMPONENT_KEYS['to_component_class'][url_name]
view = ComponentView.as_view(
ComponentClass=ComponentClass, component_key=url_name,
init_obj_cache=init_obj_cache)
if kwargs:
request.path = reverse(url_name, kwargs=kwargs)
else:
request.path = reverse(url_name, args=args)
if not full_passthrough:
request.method = "GET"
request.POST = QueryDict({})
ALLOWED_GET_KEYS = ('v', 'format', 'pretty')
passthrough_dict = dict((k, v) for k, v in request.GET.iteritems() if k in ALLOWED_GET_KEYS)
request.GET = QueryDict('').copy()
request.GET.update(passthrough_dict)
request._request = {}
request.is_execute_request = True
if kwargs:
return view(request, **kwargs)
else:
return view(request, *args)
def json_response(obj, status=200):
'''
Return a json response.
'''
return HttpResponse(json.dumps(obj), mimetype='application/json', status=status)
|
|
"""Abstract tensor product."""
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
from sympy.core.trace import Tr
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
[1, 0, 2, 0]
[0, 1, 0, 2]
[3, 0, 4, 0]
[0, 3, 0, 4]
>>> TensorProduct(m2, m1)
[1, 2, 0, 0]
[3, 4, 0, 0]
[0, 0, 1, 2]
[0, 0, 3, 4]
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part*new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part*tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [ t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length-1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length-1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u'\u2a02' + u' '))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length-1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i]+(aa,)+args[i+1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' % \
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i]*next.args[i]
else:
# this won't quite work as we don't want next in the TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i]*next
current = next
return Mul(*c_part)*TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base)**e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import mock
from oslo.utils import timeutils
from testtools import matchers
from keystone import assignment
from keystone import auth
from keystone.common import authorization
from keystone.common import environment
from keystone import config
from keystone import exception
from keystone.models import token_model
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests.ksfixtures import database
from keystone import token
from keystone.token import provider
from keystone import trust
CONF = config.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
HOST_URL = 'http://keystone:5001'
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(AuthTest, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
self.context_with_remote_user = {'environment':
{'REMOTE_USER': 'FOO',
'AUTH_TYPE': 'Negotiate'}}
self.empty_context = {'environment': {}}
self.controller = token.controllers.Auth()
# This call sets up, among other things, the call to popen
# that will be used to run the CMS command. These tests were
# passing only due to the global nature of the call. If the
# tests in this file are run alone, API calls return unauthorized.
environment.use_eventlet(monkeypatch_thread=False)
def assertEqualTokens(self, a, b, enforce_audit_ids=True):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
del token['access']['token']['audit_ids']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
if enforce_audit_ids:
self.assertIn(a['access']['token']['audit_ids'][0],
b['access']['token']['audit_ids'])
self.assertThat(len(a['access']['token']['audit_ids']),
matchers.LessThan(3))
self.assertThat(len(b['access']['token']['audit_ids']),
matchers.LessThan(3))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
context={}, auth={})
def test_empty_remote_user(self):
"""Verify that _authenticate_external() raises exception if
REMOTE_USER is set as the empty string.
"""
context = {'environment': {'REMOTE_USER': ''}}
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
context=context, auth={})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
class AuthWithToken(AuthTest):
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate({}, body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
# Get a token on BAR tenant using the unscoped tenant
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
scoped_token_id = scoped_token['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'me'}),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=scoped_token_id)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_context, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(project1['id'], project1)
role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
no_context = {}
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(no_context, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(
dict(is_admin=True, query_string={}), role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
dict(is_admin=True, query_string={}),
token_id=token_id)
def test_only_original_audit_id_is_kept(self):
context = {}
def get_audit_ids(token):
return token['access']['token']['audit_ids']
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
starting_audit_id = get_audit_ids(unscoped_token)[0]
self.assertIsNotNone(starting_audit_id)
# get another token to ensure the correct parent audit_id is set
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
audit_ids = get_audit_ids(unscoped_token_2)
self.assertThat(audit_ids, matchers.HasLength(2))
self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
# get another token from token 2 and ensure the correct parent
# audit_id is set
body_dict = _build_user_auth(token=unscoped_token_2["access"]["token"])
unscoped_token_3 = self.controller.authenticate(context, body_dict)
audit_ids = get_audit_ids(unscoped_token_3)
self.assertThat(audit_ids, matchers.HasLength(2))
self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
def test_revoke_by_audit_chain_id_original_token(self):
self.config_fixture.config(group='token', revoke_by_id=False)
context = {}
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
self.token_provider_api.revoke_token(token_id, revoke_chain=True)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
def test_revoke_by_audit_chain_id_chained_token(self):
self.config_fixture.config(group='token', revoke_by_id=False)
context = {}
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
self.token_provider_api.revoke_token(token_2_id, revoke_chain=True)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
def _mock_audit_info(self, parent_audit_id):
# NOTE(morgainfainberg): The token model and other cases that are
# extracting the audit id expect 'None' if the audit id doesn't
# exist. This ensures that the audit_id is None and the
# audit_chain_id will also return None.
return [None, None]
def test_revoke_with_no_audit_info(self):
self.config_fixture.config(group='token', revoke_by_id=False)
context = {}
with mock.patch.object(provider, 'audit_info', self._mock_audit_info):
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
# get a second token
body_dict = _build_user_auth(
token=unscoped_token['access']['token'])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
self.token_provider_api.revoke_token(token_id, revoke_chain=True)
revoke_events = self.revoke_api.get_events()
self.assertThat(revoke_events, matchers.HasLength(1))
revoke_event = revoke_events[0].to_dict()
self.assertIn('expires_at', revoke_event)
self.assertEqual(unscoped_token_2['access']['token']['expires'],
revoke_event['expires_at'])
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
# get a new token, with no audit info
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(context, body_dict)
token_id = unscoped_token['access']['token']['id']
# get a second token
body_dict = _build_user_auth(
token=unscoped_token['access']['token'])
unscoped_token_2 = self.controller.authenticate(context, body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
# Revoke by audit_id, no audit_info means both parent and child
# token are revoked.
self.token_provider_api.revoke_token(token_id)
revoke_events = self.revoke_api.get_events()
self.assertThat(revoke_events, matchers.HasLength(2))
revoke_event = revoke_events[1].to_dict()
self.assertIn('expires_at', revoke_event)
self.assertEqual(unscoped_token_2['access']['token']['expires'],
revoke_event['expires_at'])
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
class AuthWithPasswordCredentials(AuthTest):
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises a 400 error."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate({}, body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': new_domain_id,
'name': uuid.uuid4().hex,
}
self.assignment_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user_password = uuid.uuid4().hex
new_user = {
'name': self.user_foo['name'],
'domain_id': new_domain_id,
'password': new_user_password,
'email': 'foo@bar2.com',
}
new_user = self.identity_api.create_user(new_user)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user_password)
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
class AuthWithRemoteUser(AuthTest):
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{'REMOTE_USER': 'FOO'},
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
{'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{'environment': {'REMOTE_USER': uuid.uuid4().hex}},
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class AuthWithTrust(AuthTest):
def setUp(self):
super(AuthWithTrust, self).setUp()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_context(self, token_id):
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
auth_context = authorization.token_to_auth_context(token_ref)
return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
'token_id': token_id,
'host_url': HOST_URL}
def create_trust(self, trust_data, trustor_name, expires_at=None,
impersonation=True):
username = trustor_name
password = 'foo2'
unscoped_token = self.get_unscoped_token(username, password)
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
trust_data_copy = copy.deepcopy(trust_data)
trust_data_copy['expires_at'] = expires_at
trust_data_copy['impersonation'] = impersonation
return self.trust_controller.create_trust(
context, trust=trust_data_copy)['trust']
def get_unscoped_token(self, username, password='foo2'):
body_dict = _build_user_auth(username=username, password=password)
return self.controller.authenticate({}, body_dict)
def build_v2_token_request(self, username, password, trust,
tenant_id=None):
if not tenant_id:
tenant_id = self.tenant_bar['id']
unscoped_token = self.get_unscoped_token(username, password)
unscoped_token_id = unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=trust['id'],
tenant_id=tenant_id)
return request_body
def test_create_trust_bad_data_fails(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
context, trust=bad_sample_data)
def test_create_trust_no_roles(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = {'token_id': unscoped_token['access']['token']['id']}
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
context, trust=self.sample_data)
def test_create_trust(self):
expires_at = timeutils.strtime(timeutils.utcnow() +
datetime.timedelta(minutes=10),
fmt=TIME_FORMAT)
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=expires_at)
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
new_trust['roles_links']['self'])
for role in new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="Z")
def test_create_trust_without_project_id(self):
"""Verify that trust can be created without project id and
token can be generated with that trust.
"""
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
self.sample_data['project_id'] = None
self.sample_data['roles'] = []
new_trust = self.trust_controller.create_trust(
context, trust=self.sample_data)['trust']
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
def test_get_trust(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = {'token_id': unscoped_token['access']['token']['id'],
'host_url': HOST_URL}
new_trust = self.trust_controller.create_trust(
context, trust=self.sample_data)['trust']
trust = self.trust_controller.get_trust(context,
new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustee_user_id'])
# TODO(ayoung): Endpoints
def test_create_trust_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request('FOO', 'foo2', new_trust)
self.assertRaises(exception.Forbidden, self.controller.authenticate,
{}, request_body)
def test_token_from_trust_wrong_project_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_baz['id'], assigned_role)
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request('TWO', 'two2', new_trust,
self.tenant_baz['id'])
self.assertRaises(exception.Forbidden, self.controller.authenticate,
{}, request_body)
def fetch_v2_token_from_trust(self, trust):
request_body = self.build_v2_token_request('TWO', 'two2', trust)
auth_response = self.controller.authenticate({}, request_body)
return auth_response
def fetch_v3_token_from_trust(self, trust, trustee):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": trustee["id"],
"password": trustee["password"]
}
}
},
'scope': {
'project': {
'id': self.tenant_baz['id']
}
}
}
auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_password_data))
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": trust['id']}}}
token_auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_req_with_trust))
return token_auth_response
def test_create_v3_token_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
{'environment': {},
'query_string': {}}, v3_token_data)
def test_token_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, trust, expected_value):
tokens = self.token_provider_api._persistence._list_tokens(
self.trustee['id'], trust_id=trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assert_token_count_for_trust(new_trust, 0)
self.fetch_v2_token_from_trust(new_trust)
self.assert_token_count_for_trust(new_trust, 1)
self.token_provider_api._persistence.delete_tokens_for_user(
self.trustee['id'])
self.assert_token_count_for_trust(new_trust, 0)
def test_token_from_trust_cant_get_another_token(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
self.fetch_v2_token_from_trust(new_trust)
trust_id = new_trust['id']
tokens = self.token_provider_api._persistence._list_tokens(
self.trustor['id'],
trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_provider_api._persistence._list_tokens(
self.trustor['id'],
trust_id=trust_id)
self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
expiry = "1999-02-18T10:10:00Z"
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expiry)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_do_not_consume_remaining_uses_when_get_token_fails(self):
trust_data = copy.deepcopy(self.sample_data)
trust_data['remaining_uses'] = 3
new_trust = self.create_trust(trust_data, self.trustor['name'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(exception.Forbidden,
self.controller.authenticate, {}, request_body)
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
trust = self.trust_controller.get_trust(context,
new_trust['id'])['trust']
self.assertEqual(3, trust['remaining_uses'])
def test_v2_trust_token_contains_trustor_user_id_and_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
self.assertEqual(new_trust['trustee_user_id'],
auth_response['access']['trust']['trustee_user_id'])
self.assertEqual(new_trust['trustor_user_id'],
auth_response['access']['trust']['trustor_user_id'])
self.assertEqual(new_trust['impersonation'],
auth_response['access']['trust']['impersonation'])
self.assertEqual(new_trust['id'],
auth_response['access']['trust']['id'])
validate_response = self.controller.validate_token(
context=dict(is_admin=True, query_string={}),
token_id=auth_response['access']['token']['id'])
self.assertEqual(
new_trust['trustee_user_id'],
validate_response['access']['trust']['trustee_user_id'])
self.assertEqual(
new_trust['trustor_user_id'],
validate_response['access']['trust']['trustor_user_id'])
self.assertEqual(
new_trust['impersonation'],
validate_response['access']['trust']['impersonation'])
self.assertEqual(
new_trust['id'],
validate_response['access']['trust']['id'])
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=unscoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
{},
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=scoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='signing', token_format='UUID')
self._maintain_token_expiration()
class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
"""Tests for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_region(**kwargs):
ref = {'id': uuid.uuid4().hex}
ref.update(kwargs)
self.catalog_api.create_region(ref)
return ref
def create_endpoint(service_id, region, **kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'interface': 'public',
'region_id': region,
'service_id': service_id,
'url': 'http://localhost/%s' % uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_endpoint(id_, ref)
return ref
# Create a service for use with the endpoints.
def create_service(**kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_service(id_, ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = create_region()
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region['id'])
create_endpoint(
enabled_service_ref['id'], region['id'], enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region['id'])
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region_id'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region_id'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(tests.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
config.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
|
|
"""
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import numpy
from . import Layer, ParsingError
logger = logging.getLogger(__name__)
###############################################################################
class Recurrent(Layer): # pylint: disable=too-few-public-methods
""" A recurrent neural network.
# Properties
sequence: bool.
size: int.
bidirectional: bool.
merge: one of (multiply, add, concat, average)
type: one of (lstm, gru, sru)
# Example
```
recurrent:
size: 32
sequence: yes
bidirectional: yes
merge: average
type: lstm
```
"""
MERGE_TYPES = ('multiply', 'add', 'concat', 'average')
RNN_TYPES = ('lstm', 'gru', 'sru')
###########################################################################
def __init__(self, *args, **kwargs):
""" Creates a new recurrent layer.
"""
super().__init__(*args, **kwargs)
self.type = None
self.size = None
self.sequence = None
self.bidirectional = None
self.merge = None
###########################################################################
def _parse(self, engine):
""" Parses out the recurrent layer.
"""
self.sequence = engine.evaluate(self.args.get('sequence', True),
recursive=True)
if not isinstance(self.sequence, bool):
raise ParsingError('Wrong type for "sequence" argument in '
'recurrent layer. Expected bool, received: {}'
.format(self.sequence))
self.bidirectional = engine.evaluate(
self.args.get('bidirectional', False),
recursive=True
)
if not isinstance(self.bidirectional, bool):
raise ParsingError('Wrong type for "bidirectional" argument in '
'recurrent layer. Expected bool, received: {}'
.format(self.bidirectional))
self.merge = engine.evaluate(self.args.get('merge'), recursive=True)
if not self.bidirectional:
if self.merge is not None:
raise ParsingError('Having a "merge" strategy in a '
'"recurrent" layer only makes sense for bidirectional '
'RNNs.')
else:
if self.merge is None:
self.merge = 'average'
if not isinstance(self.merge, str):
raise ParsingError('Wrong type for "merge" argument in '
'recurrent layer. Expected one of: {}. Received: {}'
.format(', '.join(Recurrent.MERGE_TYPES), self.merge)
)
self.merge = self.merge.lower()
if self.merge not in Recurrent.MERGE_TYPES:
raise ParsingError('Bad value for "merge" argument in '
'recurrent layer. Expected one of: {}. Received: {}'
.format(', '.join(Recurrent.MERGE_TYPES), self.merge)
)
self.type = engine.evaluate(self.args.get('type', 'gru'),
recursive=True)
if not isinstance(self.type, str):
raise ParsingError('Wrong type for "type" argument in recurrent '
'layer. Expected one of: {}. Received: {}'.format(
', '.join(Recurrent.RNN_TYPES), self.type
))
self.type = self.type.lower()
if self.type not in Recurrent.RNN_TYPES:
raise ParsingError('Bad value for "type" argument in recurrent '
'layer. Expected one of: {}. Received: {}'.format(
', '.join(Recurrent.RNN_TYPES), self.type
))
self.size = engine.evaluate(self.args.get('size'), recursive=True)
if not isinstance(self.size, int):
raise ParsingError('Bad or missing value for "size" argument in '
'recurrent layer. Expected an integer. Received: {}'
.format(self.size))
if 'outer_activation' in self.args:
self.activation = engine.evaluate(self.args['outer_activation'])
else:
self.activation = None
###########################################################################
def _build(self, model):
""" Instantiates the layer with the given backend.
"""
backend = model.get_backend()
if backend.get_name() == 'keras':
if backend.keras_version() == 1:
import keras.layers as L # pylint: disable=import-error
else:
import keras.layers.recurrent as L # pylint: disable=import-error
if self.type == 'sru':
raise ValueError('SRU is only supported on PyTorch.')
func = {
'lstm' : L.LSTM,
'gru' : L.GRU
}.get(self.type)
if func is None:
raise ValueError('Unhandled RNN type: {}. This is a bug.'
.format(self.type))
if backend.keras_version() == 1:
size_key = 'output_dim'
else:
size_key = 'units'
kwargs = {
'activation' : self.activation or 'relu',
'return_sequences' : self.sequence,
'go_backwards' : False,
size_key : self.size,
'trainable' : not self.frozen
}
if self.bidirectional:
kwargs['name'] = self.name + '_fwd'
if self.merge in ('concat', ):
if kwargs[size_key] % 2 != 0:
logger.warning('Recurrent layer "%s" has an odd '
'number for "size", but has a concat-type merge '
'strategy. We are going to reduce its size by '
'one.', self.name)
kwargs[size_key] -= 1
kwargs[size_key] //= 2
forward = func(**kwargs)
kwargs['go_backwards'] = True
kwargs['name'] = self.name + '_rev'
backward = func(**kwargs)
def merge(tensor):
""" Returns a bidirectional RNN.
"""
import keras.layers as L # pylint: disable=import-error
if backend.keras_version() == 1:
return L.merge(
[forward(tensor), backward(tensor)],
mode={
'multiply' : 'mul',
'add' : 'sum',
'concat' : 'concat',
'average' : 'ave'
}.get(self.merge),
name=self.name,
**{
'concat' : {'concat_axis' : -1}
}.get(self.merge, {})
)
else:
func = {
'multiply' : L.multiply,
'add' : L.add,
'concat' : L.concatenate,
'average' : L.average
}.get(self.merge)
return func(
[forward(tensor), backward(tensor)],
axis=-1,
name=self.name
)
yield merge
else:
kwargs['name'] = self.name
yield func(**kwargs)
elif backend.get_name() == 'pytorch':
# pylint: disable=import-error
import torch.nn as nn
from kur.backend.pytorch.modules import swap_batch_dimension
if self.type == 'sru':
from sru import SRU
_SRU = SRU
else:
_SRU = None
# pylint: enable=import-error
func = {
'lstm' : nn.LSTM,
'gru' : nn.GRU,
'sru' : _SRU
}.get(self.type)
if func is None:
raise ValueError('Unhandled RNN type: {}. This is a bug.'
.format(self.type))
if self.bidirectional and self.merge != 'concat':
raise ValueError('PyTorch backend currently only supports '
'"concat" mode for bidirectional RNNs.')
if self.activation:
raise ValueError('PyTorch backend currently only supports '
'the default "outer_activation" value for RNNs.')
def connect(inputs):
""" Constructs the RNN layer.
"""
assert len(inputs) == 1
size = self.size
if self.bidirectional:
if size % 2 != 0:
logger.warning('Recurrent layer "%s" has an odd '
'number for "size", but has a concat-type merge '
'strategy. We are going to reduce its size by '
'one.', self.name)
size -= 1
size //= 2
kwargs = {
'input_size' : inputs[0]['shape'][-1],
'hidden_size' : size,
'num_layers' : 1,
'bidirectional' : self.bidirectional
}
if self.type == 'sru':
kwargs.update({
'use_tanh' : 0
})
else:
kwargs.update({
'batch_first' : True,
'bias' : True
})
def layer_func(layer, *inputs):
""" Applies the RNN
"""
if self.type == 'sru':
inputs = (swap_batch_dimension(inputs[0]), )
result, _ = layer(*(inputs + (None, )))
if self.type == 'sru':
result = swap_batch_dimension(result)
if not self.sequence:
return result[:, -1]
return result
return {
'shape' : self.shape([inputs[0]['shape']]),
'layer' : model.data.add_layer(
self.name,
func(**kwargs),
func=layer_func,
frozen=self.frozen
)(inputs[0]['layer'])
}
yield connect
else:
raise ValueError(
'Unknown or unsupported backend: {}'.format(backend))
###########################################################################
def shape(self, input_shapes):
""" Returns the output shape of this layer for a given input shape.
"""
if len(input_shapes) > 1:
raise ValueError('Recurrent layers only take a single input.')
input_shape = input_shapes[0]
if self.sequence:
return input_shape[:-1] + (self.size, )
return (self.size, )
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
|
import linear_env_small as linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 4
state_dim = 5
max_steps = 6000
np.random.seed(9501)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
print("No support")
else:
# Constants for the linear environment
Hs = 1800
A1 = 0.0020
mu1 = 250
sigma1 = 70
A2 = 0.0048
mu2 = 250
sigma2 = 70
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_small_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors_small/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors_small/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
font_labels = 16
font_legends = 22
ticksize = 16
width = 2.5
f , axarr = plt.subplots(nrows=1, ncols=2,figsize=(14,6),sharex=True )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:3],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(3))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 3",fontsize=font_labels)
axarr[0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,3:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=5 else "vT",range(3,5))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tank 4 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
'''
lines = axarr[1,0].plot(x,resu[:,:2],linewidth=width)
axarr[1,0].legend(lines , list(map(lambda x: "u"+str(x+1),range(2))),prop ={'size':font_legends})
axarr[1,0].set_title("Control Actions - Valves 1 and 2",fontsize=font_labels)
axarr[1,0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[1,0].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,0].tick_params(labelsize=ticksize)
lines = axarr[1,1].plot(x,resu[:,2:],linewidth=width)
axarr[1,1].legend(lines , list(map(lambda x: "u"+str(x+1),range(2,4))),prop ={'size':font_legends})
axarr[1,1].set_title("Control Actions - Valves 3 and 4",fontsize=font_labels)
axarr[1,1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[1,1].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,1].tick_params(labelsize=ticksize)
#sns.despine()
'''
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.008 + 0.0008*np.random.rand(); mu1 = 500+50*np.random.rand(); sigma1 = 250+25*np.random.rand()
A2 = 0.0063 + 0.00063*np.random.rand() ; mu2 = 500+50*np.random.rand(); sigma2 = 250+25*np.random.rand()
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0)
|
|
# -*- coding: utf-8 -*-
'''
Aggregation plug-in to copy all FCS files under a specified FLOW element
to the user folder.or to the session workspace for download.
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchSubCriteria
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClause
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClauseAttribute
from ch.systemsx.cisd.base.utilities import OSUtilities
import os
import subprocess
import sys
import re
import zipfile
import java.io.File
from ch.ethz.scu.obit.common.server.longrunning import LRCache
import uuid
from threading import Thread
import logging
def touch(full_file):
"""Touches a file.
"""
f = open(full_file, 'w')
f.close()
def zip_folder(folder_path, output_path):
"""Zip the contents of an entire folder recursively. Please notice that
empty sub-folders will NOT be included in the archive.
"""
# Note: os.path.relpath() does not exist in Jython.
# target = os.path.relpath(folder_path, start=os.path.dirname(folder_path))
target = folder_path[folder_path.rfind(os.sep) + 1:]
# Simple trick to build relative paths
root_len = folder_path.find(target)
try:
# Open zip file (no compression)
zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_STORED, allowZip64=True)
# Now recurse into the folder
for root, folders, files in os.walk(folder_path):
# We do not process folders. This is only useful to store empty
# folders to the archive, but 1) jython's zipfile implementation
# throws:
#
# Exception: [Errno 21] Is a directory <directory_name>
#
# when trying to write a directory to a zip file (in contrast to
# Python's implementation) and 2) oBIT does not export empty
# folders in the first place.
# Build the relative directory path (current root)
relative_dir_path = os.path.abspath(root)[root_len:]
# If a folder only contains a subfolder, we disrupt the hierarchy,
# unless we add a file.
if len(files) == 0:
touch(os.path.join(root, '~'))
files.append('~')
# Include all files
for file_name in files:
# Full file path to add
full_file_path = os.path.join(root, file_name)
relative_file_path = os.path.join(relative_dir_path, file_name)
# Workaround problem with file name encoding
full_file_path = full_file_path.encode('latin-1')
relative_file_path = relative_file_path.encode('latin-1')
# Write to zip
zip_file.write(full_file_path, relative_file_path, \
zipfile.ZIP_STORED)
except IOError, message:
raise Exception(message)
except OSError, message:
raise Exception(message)
except zipfile.BadZipfile, message:
raise Exception(message)
finally:
zip_file.close()
class Mover():
"""
Takes care of organizing the files to be copied to the user folder and
performs the actual copying.
"""
def __init__(self, experimentId, experimentType, entityType, entityId,
specimen, mode, userId, properties, logger):
"""Constructor
experimentId : id of the experiment (must be specified)
experimentType: type of the experiment.
sampleId: id of the sample (optional, if specified, the sample id
will be used in the search criteria; if set to "" only
the experiment id will be used as filter).
mode: "normal", or "zip". If mode is "normal", the files
will be copied to the user folder; if mode is "zip", the
files will be packaged into a zip files and served for
download via the browser.
userId: user id.
properties: plug-in properties.
logger: logger.
"""
# Logger
self._logger = logger
# Store properties
self._properties = properties
# Experiment identifier
self._experimentId = experimentId
# Experiment type
self._experimentType = experimentType
# Set all relevant entity types for current experiment type
self._experimentPrefix = experimentType[0:experimentType.find("_EXPERIMENT")]
# Experiment code
# If no / is found, _experimentCode will be the same as _experimentId
self._experimentCode = self._experimentId[self._experimentId.rfind("/") + 1:]
# Entity type
self._entityType = entityType
# Entity id
self._entityId = entityId
# Entity code
self._entityCode = self._entityId[self._entityId.rfind("/") + 1:]
# Specimen name (or "")
self._specimen = specimen
# User folder: depending on the 'mode' settings, the user folder changes
if mode =="normal":
# Standard user folder
self._userFolder = os.path.join(self._properties['base_dir'], \
userId, self._properties['export_dir'])
elif mode == "zip":
# Get the path to the user's Session Workspace
sessionWorkspace = sessionWorkspaceProvider.getSessionWorkspace()
# The user folder now will point to the Session Workspace
self._userFolder = sessionWorkspace.absolutePath
else:
raise Exception("Bad value for argument 'mode' (" + mode +")")
# Store the mode
self._mode = mode
# Make sure the use folder (with export subfolder) exists and has
# the correct permissions
if not os.path.isdir(self._userFolder):
self._createDir(self._userFolder)
# Get the experiment
self._experiment = searchService.getExperiment(self._experimentId)
# Get the experiment name
self._experimentName = \
self._experiment.getPropertyValue(self._experimentPrefix + "_EXPERIMENT_NAME")
# Export full path in user/tmp folder
self._rootExportPath = os.path.join(self._userFolder, self._experimentCode)
# Experiment full path within the export path
self._experimentPath = os.path.join(self._rootExportPath,
self._experimentName)
# Current path: this is used to keep track of the path where to copy
# files when navigating the experiment hierarchy
self._currentPath = ""
# Message (in case of error)
self._message = ""
# Info
self._logger.info("Export experiment with code " + \
self._experimentCode + " to " + \
str(self._userFolder))
self._logger.info("Export mode is " + self._mode)
# Keep track of the number of copied files
self._numCopiedFiles = 0
# Public methods
# =========================================================================
def process(self):
"""
Uses the information stored in the Mover object to reconstruct the
structure of the experiment and copies it to the user folder. If the
processing was successful, the method returns True. Otherwise,
it returns False.
"""
# Check that the entity code is set properly (in the constructor)
if self._entityCode == '':
self._message = "Could not get entity code from identifier!"
self._logger.error(self._message)
return False
# Check that the experiment could be retrieved
if self._experiment is None:
self._message = "Could not retrieve experiment with " \
"identifier " + self._entityId + "!"
self._logger.error(self._message)
return False
# At this stage we can create the experiment folder in the user dir
# (and export root)
if not self._createRootAndExperimentFolder():
self._message = "Could not create experiment folder " + \
self._rootExportPath
self._logger.error(self._message)
return False
# Now process depending on the entity type
if self._entityType == self._experimentPrefix + "_EXPERIMENT":
# Copy all datasets contained in this experiment
return self._copyDataSetsForExperiment()
elif self._entityType == self._experimentPrefix + "_ALL_PLATES":
# Copy all datasets for all plates Experiment
return self._copyDataSetsForPlates()
elif self._entityType == self._experimentPrefix + "_TUBESET":
# Copy all datasets for the tubes in the Experiment optionally
# filtered by given specimen (if stored in self._specimen)
return self._copyDataSetsForTubes()
elif self._entityType == self._experimentPrefix + "_PLATE":
# Copy all the datasets contained in selected plate
return self._copyDataSetsForPlate()
elif self._entityType == self._experimentPrefix + "_WELL":
# Copy the datasets contained in this well
return self._copyDataSetsForWell()
elif self._entityType == self._experimentPrefix + "_TUBE":
# Copy the datasets contained in this tube
return self._copyDataSetsForTube()
elif self._entityType == self._experimentPrefix + "_FCSFILE":
# Copy current FCS file sample
return self._copyDataSetForFCSFileSample()
else:
self._message = "Unknown entity!"
self._logger.error(self._message)
return False
# Return
return True
def compressIfNeeded(self):
"""Compresses the exported experiment folder to a zip archive
but only if the mode was "zip".
"""
if self._mode == "zip":
zip_folder(self._rootExportPath, self.getZipArchiveFullPath())
def getZipArchiveFullPath(self):
"""Return the full path of the zip archive (or "" if mode was "normal").
"""
if self._mode == "zip":
return self._rootExportPath + ".zip"
return ""
def getZipArchiveFileName(self):
"""Return the file name of the zip archive without path."""
if self._mode == "zip":
fullFile = java.io.File(self.getZipArchiveFullPath())
return fullFile.getName()
return ""
def getErrorMessage(self):
"""
Return the error message (in case process() returned failure)
"""
return self._message
def getNumberOfCopiedFiles(self):
"""
Return the number of copied files.
"""
return self._numCopiedFiles
def getRelativeRootExperimentPath(self):
"""
Return the experiment path relative to the user folder.
"""
return userId + "/" + \
self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]
# Private methods
# =========================================================================
def _copyDataSetsForExperiment(self):
"""
Copies all FCS files in the experiment to the user directory
reconstructing the sample hierarchy. Plates will map to subfolders.
Tubes will be at the experiment root.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Copy all tubes - if they could not be copied, we abort
if not self._copyDataSetsForTubes():
return False
# Copy the plates
if not self._copyDataSetsForPlates():
return False
# Return success
return True
def _copyDataSetsForPlate(self, plate=None):
"""
Copy all FCS files for given plate in the experiment to the user
directory. If the plate is not passed, it will be retrieved
using self._entityId. The plate will map to a subfolder. Optionally,
the fcs files may be filtered by self._specimen, if set.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the plate
if plate is None:
plate = searchService.getSample(self._entityId)
# Get plate code and name
plateCode = plate.getCode()
plateName = plate.getPropertyValue(self._experimentPrefix + "_PLATE_NAME")
# Create a folder for the plate
self._currentPath = os.path.join(self._experimentPath, plateName)
self._createDir(self._currentPath)
# If required, create folder for the specimen
if self._specimen != "":
self._currentPath = os.path.join(self._currentPath, self._specimen)
self._createDir(self._currentPath)
# Get all datasets for the plate
dataSets = self._getDataSetsForPlate(plateCode)
if len(dataSets) == 0:
self._message = "Could not retrieve datasets for plate with code " + plateCode + "."
self._logger.error(self._message)
return False
# Get all fcs files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve files for datasets from plate " + plateCode + "."
self._logger.error(self._message)
return False
# Copy the files to the user folder (in the plate folder)
for fcsFile in dataSetFiles:
self._copyFile(fcsFile, self._currentPath)
# Return success
return True
def _copyDataSetsForPlates(self):
"""
Copy all FCS files for the plates in the experiment to the user
directory. Each plate will map to a subfolder.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the plates (if some exist)
plates = self._getAllPlates()
if len(plates) == 0:
return True
# Now iterate over the plates, retrieve their datasets and fcs files
# and copy them to the plate subfolders
for plate in plates:
if not self._copyDataSetsForPlate(plate):
self._message = "Could not retrieve datasets for plate."
self._logger.error(self._message)
return False
# Return
return True
def _copyDataSetsForTubes(self):
"""
Copy all FCS files for the tubes in the experiment to the user
directory. Tubes will be at the experiment root.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the tubes (if some exist) optionally filtered by the specimen
tubes = self._getAllTubes()
if len(tubes) == 0:
return True
# If the specimen is set, we create a folder for it
if self._specimen != "":
self._currentPath = os.path.join(self._experimentPath, self._specimen)
self._createDir(self._currentPath)
else:
self._currentPath = self._experimentPath
# Now iterate over the tubes and retrieve their datasets
dataSets = []
for tube in tubes:
tubeCode = tube.getCode()
dataSetsForSample = self._getDataSetForTube(tubeCode)
dataSets.extend(dataSetsForSample)
if len(dataSets) == 0:
self._message = "Could not retrieve datasets for tubes in " \
"experiment with code " + self._experimentCode + "."
self._logger.error(self._message)
return False
# Get all fcs files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve files for datasets from tubes."
self._logger.error(self._message)
return False
# Copy the files
for fcsFile in dataSetFiles:
self._copyFile(fcsFile, self._currentPath)
# Return success
return True
def _copyDataSetsForWell(self):
"""
Copy the datasets belonging to selected well to the expriment root.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the plate containing it
# TODO
# Create a subfolder for the plate
# TODO
# Get the datasets for the well
dataSets = self._getDataSetForWell()
# Get all fcs files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve files for datasets from well."
self._logger.error(self._message)
return False
# Store at the experiment level
self._currentPath = self._experimentPath
# Copy the files
for fcsFile in dataSetFiles:
self._copyFile(fcsFile, self._currentPath)
# Return success
return True
def _copyDataSetsForTube(self):
"""
Copy the datasets belonging to selected tube at the experiment root.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the datasets for the well
dataSets = self._getDataSetForTube()
# Get all fcs files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve files for datasets from tube."
self._logger.error(self._message)
return False
# Store at the experiment level
self._currentPath = self._experimentPath
# Copy the files
for fcsFile in dataSetFiles:
self._copyFile(fcsFile, self._currentPath)
# Return success
return True
def _copyDataSetForFCSFileSample(self):
"""
Copy the datasets belonging to the selected FCS file sample at
the experiment root.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the datasets for the FCSFile sample
dataSets = self._getDataSetForFCSFileSample()
# Get all fcs files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve files for datasets from FCSFile sample."
self._logger.error(self._message)
return False
# Store at the experiment level
self._currentPath = self._experimentPath
# Copy the files
for fcsFile in dataSetFiles:
self._copyFile(fcsFile, self._currentPath)
# Return success
return True
def _copyFile(self, source, dstDir):
"""Copies the source file (with full path) to directory dstDir.
We use a trick to preserve the NFSv4 ACLs: since copying the file
loses them, we first touch the destination file to create it, and
then we overwrite it.
"""
dstFile = os.path.join(dstDir, os.path.basename(source))
touch = "/usr/bin/touch" if OSUtilities.isMacOS() else "/bin/touch"
subprocess.call([touch, dstFile])
subprocess.call(["/bin/cp", source, dstDir])
self._logger.info("Copying file " + source + " to " + dstDir)
self._numCopiedFiles += 1
def _createDir(self, dirFullPath):
"""Creates the passed directory (with full path).
"""
os.makedirs(dirFullPath)
def _createRootAndExperimentFolder(self):
"""
Create the experiment folder. Notice that it uses information already
stored in the object, but this info is filled in in the constructor, so
it is safe to assume it is there if nothing major went wrong. In this
case, the method will return False and no folder will be created.
Otherwise, the method returns True.
Please notice that if the experiment folder already exists, _{digit}
will be appended to the folder name, to ensure that the folder is
unique. The updated folder name will be stored in the _rootExportPath
property.
"""
# This should not happen
if self._rootExportPath == "":
return False
# Make sure that the experiment folder does not already exist
expPath = self._rootExportPath
# Does the folder already exist?
if os.path.exists(expPath):
counter = 1
ok = False
while not ok:
tmpPath = expPath + "_" + str(counter)
if not os.path.exists(tmpPath):
expPath = tmpPath
ok = True
else:
counter += 1
# Update the root and experiment paths
self._rootExportPath = expPath
self._experimentPath = os.path.join(self._rootExportPath,
self._experimentName)
# Create the root folder
self._createDir(self._rootExportPath)
# And now create the experiment folder (in the root folder)
self._createDir(self._experimentPath)
# Return success
return True
def _getDataSetsForPlate(self, plateCode=None):
"""
Return a list of datasets belonging to the plate with specified ID
optionally filtered by self._specimen. If none are found, return [].
If no plateCode is given, it is assumed that the plate is the passed
entity with code self._entityCode.
"""
if plateCode is None:
plateCode = self._entityCode
# Set search criteria to retrieve all wells contained in the plate
searchCriteria = SearchCriteria()
plateCriteria = SearchCriteria()
plateCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.CODE, plateCode))
searchCriteria.addSubCriteria(SearchSubCriteria.createSampleContainerCriteria(plateCriteria))
wells = searchService.searchForSamples(searchCriteria)
if len(wells) == 0:
self._message = "Could not retrieve wells for plate with " \
"code " + plateCode + "."
self._logger.error(self._message)
return wells
# Check that the specimen matches (if needed)
if self._specimen != "":
wells = [well for well in wells if \
well.getPropertyValue(self._experimentPrefix + "_SPECIMEN") == self._specimen]
# Now iterate over the samples and retrieve their datasets
dataSets = []
for well in wells:
wellCode = well.getCode()
dataSetsForWell = self._getDataSetForWell(wellCode)
dataSets.extend(dataSetsForWell)
if len(dataSets) == 0:
self._message = "Could not retrieve datasets for wells in " \
"plate with code " + plateCode + " from experiment " \
"with code " + self._experimentCode + "."
self._logger.error(self._message)
# Return
return dataSets
def _getDataSetForWell(self, wellCode=None):
"""
Get the datasets belonging to the well with specified code. If none
are found, return [].
If no wellCode is given, it is assumed that the well is the passed
entity with code self._entityCode.
"""
if wellCode is None:
wellCode = self._entityCode
# Set search criteria to retrieve the dataset contained in the well
searchCriteria = SearchCriteria()
wellCriteria = SearchCriteria()
wellCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.CODE, wellCode))
searchCriteria.addSubCriteria(SearchSubCriteria.createSampleCriteria(wellCriteria))
dataSets = searchService.searchForDataSets(searchCriteria)
if len(dataSets) == 0:
self._message = "Could not retrieve datasets for well " \
"with code " + wellCode + "."
self._logger.error(self._message)
# Return
return dataSets
def _getDataSetForTube(self, tubeCode=None):
"""
Get the datasets belonging to the tube with specified tube code.
If none is found, return [].
If no tubeCode is given, it is assumed that the tube is the passed
entity with code self._entityCode.
"""
if tubeCode is None:
tubeCode = self._entityCode
# Set search criteria to retrieve the dataset contained in the tube
searchCriteria = SearchCriteria()
tubeCriteria = SearchCriteria()
tubeCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.CODE, tubeCode))
searchCriteria.addSubCriteria(SearchSubCriteria.createSampleCriteria(tubeCriteria))
dataSets = searchService.searchForDataSets(searchCriteria)
if len(dataSets) == 0:
self._message = "Could not retrieve datasets for tube " \
"with code " + tubeCode + "."
self._logger.error(self._message)
# Return
return dataSets
def _getDataSetForFCSFileSample(self):
"""
Get the FCS file for the sample with type {exp_prefix}_FCSFILE.
"""
# Get the dataset for current FCS file sample
dataSets = searchService.getDataSet(self._entityId)
if dataSets is None:
self._message = "Could not retrieve datasets for " \
"FCS file with identifier " + self._entityId + "!"
self._logger.error(self._message)
else:
dataSets = [dataSets]
# Return
return dataSets
def _getFilesForDataSets(self, dataSets):
"""
Get the list of FCS file paths that correspond to the input list
of datasets. If not files are found, returns [].
"""
if len(dataSets) == 0:
return []
dataSetFiles = []
for dataSet in dataSets:
content = contentProvider.getContent(dataSet.getDataSetCode())
nodes = content.listMatchingNodes("original", ".*\.fcs")
if nodes is not None:
for node in nodes:
fileName = node.tryGetFile()
if fileName is not None:
fileName = str(fileName)
if fileName.lower().endswith(".fcs"):
dataSetFiles.append(fileName)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve dataset files!"
self._logger.error(self._message)
# Return the files
return dataSetFiles
def _getAllPlates(self):
"""
Get all plates in the experiment. Returns [] if none are found.
"""
# Set search criteria to retrieve all plates in the experiment
searchCriteria = SearchCriteria()
searchCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.TYPE, self._experimentPrefix + "_PLATE"))
expCriteria = SearchCriteria()
expCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._experiment.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createExperimentCriteria(expCriteria))
plates = searchService.searchForSamples(searchCriteria)
if len(plates) == 0:
self._message = "The experiment with code " + \
self._experimentCode + " does not contain plates."
self._logger.info(self._message)
return plates
# Return the plates
return plates
def _getAllTubes(self):
"""
Get all tubes in the experiment. If the specimen is set (self._specimen),
then return only those tubes that belong to it.
Returns [] if none are found.
"""
# Set search criteria to retrieve all tubes in the experiment
# All tubes belong to a virtual tubeset - so the set of tubes in the
# experiment is exactly the same as the set of tubes in the virtual
# tubeset
searchCriteria = SearchCriteria()
searchCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.TYPE, self._experimentPrefix + "_TUBE"))
expCriteria = SearchCriteria()
expCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._experiment.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createExperimentCriteria(expCriteria))
tubes = searchService.searchForSamples(searchCriteria)
if len(tubes) == 0:
self._message = "The experiment with code " + \
self._experimentCode + "does not contain tubes."
self._logger.error(self._message)
return tubes
# Check that the specimen matches (if needed)
if self._specimen != "":
tubes = [tube for tube in tubes if \
tube.getPropertyValue(self._experimentPrefix + "_SPECIMEN") == self._specimen]
# Return the (filtered) tubes
return tubes
# Parse properties file for custom settings
def parsePropertiesFile():
"""Parse properties file for custom plug-in settings."""
filename = "../core-plugins/flow/2/dss/reporting-plugins/export_flow_datasets/plugin.properties"
var_names = ['base_dir', 'export_dir']
properties = {}
try:
fp = open(filename, "r")
except:
return properties
try:
for line in fp:
line = re.sub('[ \'\"\n]', '', line)
parts = line.split("=")
if len(parts) == 2:
if parts[0] in var_names:
properties[parts[0]] = parts[1]
finally:
fp.close()
# Check that all variables were found
if len(properties.keys()) == 0:
return None
found_vars = properties.keys()
for var_name in var_names:
if var_name not in found_vars:
return None
# Make sure that there are no Windows line endings
for var_name in var_names:
properties[var_name] = properties[var_name].replace('\r', '')
# Everything found
return properties
# Plug-in entry point
#
# Input parameters:
#
# uid : job unique identifier (see below)
# experimentId : experiment identifier
# experimentType: experiment type
# entityType : entity type
# entityId : entity ID
# specimen : name of the specimen
# mode : requested mode of operation: one of 'normal', 'hrm', zip'.
#
# This method returns a table to the client with a different set of columns
# depending on whether the plug-in is called for the first time and the process
# is just started, or if it is queried for completeness at a later time.
#
# At the end of the first call, a table with following columns is returned:
#
# uid : unique identifier of the running plug-in
# completed: indicated if the plug-in has finished. This is set to False in the
# first call.
#
# Later calls return a table with the following columns:
#
# uid : unique identifier of the running plug-in. This was returned to
# the client in the first call and was passed on again as a parameter.
# Here it is returned again to make sure that client and server
# always know which task they are talking about.
# completed: True if the process has completed in the meanwhile, False if it
# is still running.
# success : True if the process completed successfully, False otherwise.
# message : error message in case success was False.
# nCopiedFiles: total number of copied files.
# relativeExpFolder: folder to the copied folder relative to the root of the
# export folder.
# zipArchiveFileName: file name of the zip in case compression was requested.
# mode : requested mode of operation.
def aggregate(parameters, tableBuilder):
# Get the ID of the call if it already exists
uid = parameters.get("uid");
if uid is None or uid == "":
# Create a unique id
uid = str(uuid.uuid4())
# Add the table headers
tableBuilder.addHeader("uid")
tableBuilder.addHeader("completed")
# Fill in relevant information
row = tableBuilder.addRow()
row.setCell("uid", uid)
row.setCell("completed", False)
# Launch the actual process in a separate thread
thread = Thread(target = aggregateProcess,
args = (parameters, tableBuilder, uid))
thread.start()
# Return immediately
return
# The process is already running in a separate thread. We get current
# results and return them
resultToSend = LRCache.get(uid);
if resultToSend is None:
# This should not happen
raise Exception("Could not retrieve results from result cache!")
# Add the table headers
tableBuilder.addHeader("uid")
tableBuilder.addHeader("completed")
tableBuilder.addHeader("success")
tableBuilder.addHeader("message")
tableBuilder.addHeader("nCopiedFiles")
tableBuilder.addHeader("relativeExpFolder")
tableBuilder.addHeader("zipArchiveFileName")
tableBuilder.addHeader("mode")
# Store current results in the table
row = tableBuilder.addRow()
row.setCell("uid", resultToSend["uid"])
row.setCell("completed", resultToSend["completed"])
row.setCell("success", resultToSend["success"])
row.setCell("message", resultToSend["message"])
row.setCell("nCopiedFiles", resultToSend["nCopiedFiles"])
row.setCell("relativeExpFolder", resultToSend["relativeExpFolder"])
row.setCell("zipArchiveFileName", resultToSend["zipArchiveFileName"])
row.setCell("mode", resultToSend["mode"])
# Actual work process
def aggregateProcess(parameters, tableBuilder, uid):
# Make sure to initialize and store the results. We need to have them since
# most likely the client will try to retrieve them again before the process
# is finished.
resultToStore = {}
resultToStore["uid"] = uid
resultToStore["success"] = True
resultToStore["completed"] = False
resultToStore["message"] = ""
resultToStore["nCopiedFiles"] = ""
resultToStore["relativeExpFolder"] = ""
resultToStore["zipArchiveFileName"] = ""
resultToStore["mode"] = ""
LRCache.set(uid, resultToStore)
# Get path to containing folder
# __file__ does not work (reliably) in Jython
dbPath = "../core-plugins/flow/2/dss/reporting-plugins/export_flow_datasets"
# Path to the logs subfolder
logPath = os.path.join(dbPath, "logs")
# Make sure the logs subforder exist
if not os.path.exists(logPath):
os.makedirs(logPath)
# Path for the log file
logFile = os.path.join(logPath, "log.txt")
# Set up logging
logging.basicConfig(filename=logFile, level=logging.DEBUG,
format='%(asctime)-15s %(levelname)s: %(message)s')
logger = logging.getLogger()
# Get parameters from plugin.properties
properties = parsePropertiesFile()
if properties is None:
raise Exception("Could not process plugin.properties")
# Get the experiment identifier
experimentId = parameters.get("experimentId")
# Get the experiment type
experimentType = parameters.get("experimentType")
# Get the entity type
entityType = parameters.get("entityType")
# Get the entity code
entityId = parameters.get("entityId")
# Get the specimen name
specimen = parameters.get("specimen")
# Get the mode
mode = parameters.get("mode")
# Info
logger.info("Aggregation plug-in called with following parameters:")
logger.info("experimentId = " + experimentId)
logger.info("experimentType = " + experimentType)
logger.info("entityType = " + entityType)
logger.info("entityId = " + entityId)
logger.info("specimen = " + specimen)
logger.info("mode = " + mode)
logger.info("userId = " + userId)
logger.info("Aggregation plugin properties:")
logger.info("properties = " + str(properties))
# Instantiate the Mover object - userId is a global variable
# made available to the aggregation plug-in
mover = Mover(experimentId, experimentType, entityType, entityId, specimen,
mode, userId, properties, logger)
# Process
success = mover.process()
# Compress
if mode == "zip":
mover.compressIfNeeded()
# Get some results info
nCopiedFiles = mover.getNumberOfCopiedFiles()
errorMessage = mover.getErrorMessage()
relativeExpFolder = mover.getRelativeRootExperimentPath()
zipFileName = mover.getZipArchiveFileName()
# Update results and store them
resultToStore["uid"] = uid
resultToStore["completed"] = True
resultToStore["success"] = success
resultToStore["message"] = errorMessage
resultToStore["nCopiedFiles"] = nCopiedFiles
resultToStore["relativeExpFolder"] = relativeExpFolder
resultToStore["zipArchiveFileName"] = zipFileName
resultToStore["mode"] = mode
LRCache.set(uid, resultToStore)
# Email result to the user
if success == True:
subject = "Flow export: successfully processed requested data"
if nCopiedFiles == 1:
snip = "One file was "
else:
snip = str(nCopiedFiles) + " files were "
if mode == "normal":
body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
else:
body = snip + "successfully packaged for download: " + zipFileName
else:
subject = "Flow export: error processing request!"
body = "Sorry, there was an error processing your request. " + \
"Please send your administrator the following report:\n\n" + \
"\"" + errorMessage + "\"\n"
# Send
try:
mailService.createEmailSender().withSubject(subject).withBody(body).send()
except:
sys.stderr.write("export_flow_datasets: Failure sending email to user!")
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
def true_branch():
with ops.control_dependencies([op]):
return 1.0
cond_v2.cond_v2(array_ops.placeholder_with_default(False, None),
true_branch,
lambda: 2.0).eval()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def _createNestedCond(self, name):
"""Like _createCond but creates a nested cond_v2 call as well."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1)
def false_fn():
return x + 2
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientTapeOfCondWithResourceVariableInFunction(self):
with context.eager_mode():
v = variables.Variable(2.)
@def_function.function
def fnWithCond(): # pylint: disable=invalid-name
with backprop.GradientTape() as tape:
pred = constant_op.constant(True, dtype=dtypes.bool)
def true_fn():
return math_ops.pow(v, 3)
def false_fn():
return v
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
return tape.gradient(cond, v)
self.assertAllEqual(fnWithCond(), 12.0)
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, cond_op = self._createCond("cond")
xla_context.Exit()
# Check lowering attr is not set.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
# Check the actual graph that is run.
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
@test_util.run_deprecated_v1
def testNestedLoweringDisabledInXLA(self):
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
_, cond_op = self._createNestedCond("cond")
xla_context.Exit()
# Check lowering attr is not set for either If node.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
nested_if_ops = []
for func in ops.get_default_graph()._functions.values():
nested_if_ops.extend(op for op in func._graph.get_operations()
if op.type == "If")
self.assertEqual(len(nested_if_ops), 1)
with self.assertRaises(ValueError):
nested_if_ops[0].get_attr("_lower_using_switch_merge")
# TODO(skyewm): check the actual graphs that are run once we have a way to
# programmatically access those graphs.
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
TypeError, "true_fn and false_fn arguments to tf.cond must have the "
"same number, type, and overall structure of return values."):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True),
lambda: x * 2.0,
lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "If")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output 2.0 intermediate.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
|
|
"""
oauthlib.oauth2.rfc6749.tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods for adding two types of access tokens to requests.
- Bearer http://tools.ietf.org/html/rfc6750
- MAC http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
"""
from __future__ import absolute_import, unicode_literals
from binascii import b2a_base64
import hashlib
import hmac
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from oauthlib import common
from . import utils
class OAuth2Token(dict):
def __init__(self, params, old_scope=None):
super(OAuth2Token, self).__init__(params)
self._new_scope = None
if 'scope' in params:
try:
self._new_scope = set(utils.scope_to_list(params['scope']))
except TypeError:
self._new_scope = None
if old_scope is not None:
self._old_scope = set(utils.scope_to_list(old_scope))
if self._new_scope is None:
# the rfc says that if the scope hasn't changed, it's optional
# in params so set the new scope to the old scope
self._new_scope = self._old_scope
else:
self._old_scope = self._new_scope
@property
def scope_changed(self):
return self._new_scope != self._old_scope
@property
def old_scope(self):
return utils.list_to_scope(self._old_scope)
@property
def old_scopes(self):
return list(self._old_scope)
@property
def scope(self):
return utils.list_to_scope(self._new_scope)
@property
def scopes(self):
return list(self._new_scope)
@property
def missing_scopes(self):
return list(self._old_scope - self._new_scope)
@property
def additional_scopes(self):
return list(self._new_scope - self._old_scope)
def prepare_mac_header(token, uri, key, http_method,
nonce=None,
headers=None,
body=None,
ext='',
hash_algorithm='hmac-sha-1',
issue_time=None,
draft=0):
"""Add an `MAC Access Authentication`_ signature to headers.
Unlike OAuth 1, this HMAC signature does not require inclusion of the
request payload/body, neither does it use a combination of client_secret
and token_secret but rather a mac_key provided together with the access
token.
Currently two algorithms are supported, "hmac-sha-1" and "hmac-sha-256",
`extension algorithms`_ are not supported.
Example MAC Authorization header, linebreaks added for clarity
Authorization: MAC id="h480djs93hd8",
nonce="1336363200:dj83hs9s",
mac="bhCQXTVyfj5cmA9uKkPFx1zeOXM="
.. _`MAC Access Authentication`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
.. _`extension algorithms`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-7.1
:param uri: Request URI.
:param headers: Request headers as a dictionary.
:param http_method: HTTP Request method.
:param key: MAC given provided by token endpoint.
:param hash_algorithm: HMAC algorithm provided by token endpoint.
:param issue_time: Time when the MAC credentials were issued (datetime).
:param draft: MAC authentication specification version.
:return: headers dictionary with the authorization field added.
"""
http_method = http_method.upper()
host, port = utils.host_from_uri(uri)
if hash_algorithm.lower() == 'hmac-sha-1':
h = hashlib.sha1
elif hash_algorithm.lower() == 'hmac-sha-256':
h = hashlib.sha256
else:
raise ValueError('unknown hash algorithm')
if draft == 0:
nonce = nonce or '{0}:{1}'.format(utils.generate_age(issue_time),
common.generate_nonce())
else:
ts = common.generate_timestamp()
nonce = common.generate_nonce()
sch, net, path, par, query, fra = urlparse(uri)
if query:
request_uri = path + '?' + query
else:
request_uri = path
# Hash the body/payload
if body is not None and draft == 0:
body = body.encode('utf-8')
bodyhash = b2a_base64(h(body).digest())[:-1].decode('utf-8')
else:
bodyhash = ''
# Create the normalized base string
base = []
if draft == 0:
base.append(nonce)
else:
base.append(ts)
base.append(nonce)
base.append(http_method.upper())
base.append(request_uri)
base.append(host)
base.append(port)
if draft == 0:
base.append(bodyhash)
base.append(ext or '')
base_string = '\n'.join(base) + '\n'
# hmac struggles with unicode strings - http://bugs.python.org/issue5285
if isinstance(key, unicode_type):
key = key.encode('utf-8')
sign = hmac.new(key, base_string.encode('utf-8'), h)
sign = b2a_base64(sign.digest())[:-1].decode('utf-8')
header = []
header.append('MAC id="%s"' % token)
if draft != 0:
header.append('ts="%s"' % ts)
header.append('nonce="%s"' % nonce)
if bodyhash:
header.append('bodyhash="%s"' % bodyhash)
if ext:
header.append('ext="%s"' % ext)
header.append('mac="%s"' % sign)
headers = headers or {}
headers['Authorization'] = ', '.join(header)
return headers
def prepare_bearer_uri(token, uri):
"""Add a `Bearer Token`_ to the request URI.
Not recommended, use only if client can't use authorization header or body.
http://www.example.com/path?access_token=h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
return add_params_to_uri(uri, [(('access_token', token))])
def prepare_bearer_headers(token, headers=None):
"""Add a `Bearer Token`_ to the request URI.
Recommended method of passing bearer tokens.
Authorization: Bearer h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
headers = headers or {}
headers['Authorization'] = 'Bearer %s' % token
return headers
def prepare_bearer_body(token, body=''):
"""Add a `Bearer Token`_ to the request body.
access_token=h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
return add_params_to_qs(body, [(('access_token', token))])
def random_token_generator(request, refresh_token=False):
return common.generate_token()
def signed_token_generator(private_pem, **kwargs):
def signed_token_generator(request):
request.claims = kwargs
return common.generate_signed_token(private_pem, request)
return signed_token_generator
class TokenBase(object):
def __call__(self, request, refresh_token=False):
raise NotImplementedError('Subclasses must implement this method.')
def validate_request(self, request):
raise NotImplementedError('Subclasses must implement this method.')
def estimate_type(self, request):
raise NotImplementedError('Subclasses must implement this method.')
class BearerToken(TokenBase):
def __init__(self, request_validator=None, token_generator=None,
expires_in=None, refresh_token_generator=None):
self.request_validator = request_validator
self.token_generator = token_generator or random_token_generator
self.refresh_token_generator = (
refresh_token_generator or self.token_generator
)
self.expires_in = expires_in or 3600
def create_token(self, request, refresh_token=False):
"""Create a BearerToken, by default without refresh token."""
if callable(self.expires_in):
expires_in = self.expires_in(request)
else:
expires_in = self.expires_in
request.expires_in = expires_in
token = {
'access_token': self.token_generator(request),
'expires_in': expires_in,
'token_type': 'Bearer',
}
if request.scopes is not None:
token['scope'] = ' '.join(request.scopes)
if request.state is not None:
token['state'] = request.state
if refresh_token:
if (request.refresh_token and
not self.request_validator.rotate_refresh_token(request)):
token['refresh_token'] = request.refresh_token
else:
token['refresh_token'] = self.refresh_token_generator(request)
token.update(request.extra_credentials or {})
token = OAuth2Token(token)
self.request_validator.save_bearer_token(token, request)
return token
def validate_request(self, request):
token = None
if 'Authorization' in request.headers:
token = request.headers.get('Authorization')[7:]
else:
token = request.access_token
return self.request_validator.validate_bearer_token(
token, request.scopes, request)
def estimate_type(self, request):
if request.headers.get('Authorization', '').startswith('Bearer'):
return 9
elif request.access_token is not None:
return 5
else:
return 0
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Menu Structure and Layout
@copyright: 2011-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3MainMenu",
"S3OptionsMenu"
]
import re
from gluon import *
from gluon.storage import Storage
from s3 import *
from s3layouts import *
# =============================================================================
class S3MainMenu(object):
""" The default configurations for the main application menu """
@classmethod
def menu(cls):
main_menu = MM()(
# Modules-menu, align-left
cls.menu_modules(),
# Service menus, align-right
# Note: always define right-hand items in reverse order!
cls.menu_help(right=True),
cls.menu_auth(right=True),
cls.menu_lang(right=True),
cls.menu_admin(right=True),
cls.menu_gis(right=True)
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
# ---------------------------------------------------------------------
# Modules Menu
# @todo: this is very ugly - cleanup or make a better solution
# @todo: probably define the menu explicitly?
#
menu_modules = []
all_modules = current.deployment_settings.modules
# Home always 1st
module = all_modules["default"]
menu_modules.append(MM(module.name_nice, c="default", f="index"))
auth = current.auth
# Modules to hide due to insufficient permissions
hidden_modules = auth.permission.hidden_modules()
has_role = auth.s3_has_role
# The Modules to display at the top level (in order)
for module_type in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
for module in all_modules:
if module in hidden_modules:
continue
_module = all_modules[module]
if (_module.module_type == module_type):
if not _module.access:
menu_modules.append(MM(_module.name_nice, c=module, f="index"))
else:
groups = re.split("\|", _module.access)[1:-1]
menu_modules.append(MM(_module.name_nice,
c=module,
f="index",
restrict=groups))
# Modules to display off the 'more' menu
modules_submenu = []
for module in all_modules:
if module in hidden_modules:
continue
_module = all_modules[module]
if (_module.module_type == 10):
if not _module.access:
modules_submenu.append(MM(_module.name_nice, c=module, f="index"))
else:
groups = re.split("\|", _module.access)[1:-1]
modules_submenu.append(MM(_module.name_nice,
c=module,
f="index",
restrict=groups))
if modules_submenu:
# Only show the 'more' menu if there are entries in the list
module_more_menu = MM("more", link=False)(modules_submenu)
menu_modules.append(module_more_menu)
return menu_modules
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls, **attr):
""" Language menu """
settings = current.deployment_settings
if not settings.get_L10n_display_toolbar():
return None
languages = current.response.s3.l10n_languages
request = current.request
menu_lang = MM("Language", **attr)
for language in languages:
menu_lang.append(MM(languages[language], r=request,
translate=False,
vars={"_language":language},
ltr=True
))
return menu_lang
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
menu_help = MM("Help", c="default", f="help", **attr)(
MM("Contact us", f="contact"),
MM("About", f="about")
)
# -------------------------------------------------------------------
# Now add the available guided tours to the help menu
# check that a guided_tour is enabled
if current.deployment_settings.get_base_guided_tour():
# load the guided tour configuration from the database
table = current.s3db.tour_config
logged_in = current.auth.is_logged_in()
if logged_in:
query = (table.deleted == False) &\
(table.role != "")
else:
query = (table.deleted == False) &\
(table.role == "")
tours = current.db(query).select(table.id,
table.name,
table.controller,
table.function,
table.role,
)
if len(tours) > 0:
menu_help.append(SEP())
for row in tours:
menu_help.append(MM(row.name,
c=row.controller,
f=row.function,
vars={"tour":row.id},
restrict=row.role
)
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
self_registration = current.deployment_settings.get_security_self_registration()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
menu_auth = MM("Login", c="default", f="user", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration),
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
MM("User Profile", m="profile"),
MM("Personal Data", c="default", f="person", m="update"),
MM("Contact Details", c="pr", f="person",
args="contact",
vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
#args="pe_subscription",
#vars={"person.pe_id" : auth.user.pe_id}),
MM("Change Password", m="change_password"),
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True},
f="rapid"),
)
return menu_auth
# -------------------------------------------------------------------------
@classmethod
def menu_admin(cls, **attr):
""" Administrator Menu """
ADMIN = current.session.s3.system_roles.ADMIN
settings = current.deployment_settings
name_nice = settings.modules["admin"].name_nice
translate = settings.has_module("translate")
menu_admin = MM(name_nice, c="admin",
restrict=[ADMIN], **attr)(
MM("Settings", f="setting"),
MM("Users", f="user"),
MM("Person Registry", c="pr"),
MM("Database", c="appadmin", f="index"),
MM("Error Tickets", f="errors"),
MM("Synchronization", c="sync", f="index"),
MM("Translation", c="admin", f="translate",
check=translate),
MM("Test Results", f="result"),
)
return menu_admin
# -------------------------------------------------------------------------
@classmethod
def menu_gis(cls, **attr):
""" GIS Config Menu """
settings = current.deployment_settings
if not settings.get_gis_menu():
return None
T = current.T
db = current.db
auth = current.auth
s3db = current.s3db
request = current.request
s3 = current.session.s3
_config = s3.gis_config_id
# See if we need to switch config before we decide which
# config item to mark as active:
if "_config" in request.get_vars:
# The user has just selected a config from the GIS menu
try:
config = int(request.get_vars._config)
except ValueError:
# Manually-crafted URL?
pass
else:
if _config is None or _config != config:
# Set this as the current config
s3.gis_config_id = config
cfg = current.gis.get_config()
s3.location_filter = cfg.region_location_id
if settings.has_module("event"):
# See if this config is associated with an Event
table = s3db.event_config
query = (table.config_id == config)
incident = db(query).select(table.incident_id,
limitby=(0, 1)).first()
if incident:
s3.event = incident.incident_id
else:
s3.event = None
# Don't use the outdated cache for this call
cache = None
else:
cache = s3db.cache
# Check if there are multiple GIS Configs for the user to switch between
table = s3db.gis_menu
ctable = s3db.gis_config
query = (table.pe_id == None)
if auth.is_logged_in():
# @ToDo: Search for OUs too (API call)
query |= (table.pe_id == auth.user.pe_id)
query &= (table.config_id == ctable.id)
configs = db(query).select(ctable.id, ctable.name, cache=cache)
gis_menu = MM(settings.get_gis_menu(),
c=request.controller,
f=request.function,
**attr)
args = request.args
if len(configs):
# Use short names for the site and personal configs else they'll wrap.
# Provide checkboxes to select between pages
gis_menu(
MM({"name": T("Default"),
"id": "gis_menu_id_0",
# @ToDo: Show when default item is selected without having
# to do a DB query to read the value
#"value": _config is 0,
"request_type": "load"
}, args=args, vars={"_config": 0}
)
)
for config in configs:
gis_menu(
MM({"name": config.name,
"id": "gis_menu_id_%s" % config.id,
"value": _config == config.id,
"request_type": "load"
}, args=args, vars={"_config": config.id}
)
)
return gis_menu
# =============================================================================
class S3OptionsMenu(object):
"""
The default configurations for options menus
Define one function per controller with the controller prefix as
function name and with "self" as its only argument (must be an
instance method!), and let it return the controller menu
definition as an instance of the layout (=an S3NavigationItem
subclass, standard: M).
In the standard layout, the main item in a controller menu does
not have a label. If you want to re-use a menu for multiple
controllers, do *not* define a controller setting (c="xxx") in
the main item.
"""
def __init__(self, name):
""" Constructor """
try:
self.menu = getattr(self, name)()
except:
self.menu = None
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
ADMIN = current.session.s3.system_roles.ADMIN
settings_messaging = self.settings_messaging()
translate = current.deployment_settings.has_module("translate")
# ATTN: Do not specify a controller for the main menu to allow
# re-use of this menu by other controllers
return M(restrict=[ADMIN])(
M("Settings", c="admin", f="setting")(
settings_messaging,
),
M("User Management", c="admin", f="user")(
M("New User", m="create"),
M("List All Users"),
M("Import Users", m="import"),
M("List All Roles", f="role"),
M("List All Organization Approvers & Whitelists", f="organisation"),
#M("Roles", f="group"),
#M("Membership", f="membership"),
),
M("Database", c="appadmin", f="index")(
M("Raw Database access", c="appadmin", f="index")
),
M("Error Tickets", c="admin", f="errors"),
M("Synchronization", c="sync", f="index")(
M("Settings", f="config", args=[1], m="update"),
M("Repositories", f="repository"),
M("Log", f="log"),
),
#M("Edit Application", a="admin", c="default", f="design",
#args=[request.application]),
M("Translation", c="admin", f="translate", check=translate)(
M("Select Modules for translation", c="admin", f="translate",
m="create", vars=dict(opt="1")),
M("Upload translated files", c="admin", f="translate",
m="create", vars=dict(opt="2")),
M("View Translation Percentage", c="admin", f="translate",
m="create", vars=dict(opt="3")),
M("Add strings manually", c="admin", f="translate",
m="create", vars=dict(opt="4"))
),
M("View Test Result Reports", c="admin", f="result"),
M("Portable App", c="admin", f="portable")
)
# -------------------------------------------------------------------------
def assess(self):
""" ASSESS Menu """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="assess")(
M("Building Assessments", f="building")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Map", m="map"),
),
M("Canvassing", f="canvass")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Map", m="map"),
),
#M("Rapid Assessments", f="rat")(
# M("New", m="create"),
# M("List All"),
# #M("Search", m="search"),
#),
#M("Impact Assessments", f="assess")(
# #M("New", m="create"),
# M("New", f="basic_assess", p="create"),
# M("List All"),
# M("Mobile", f="mobile_basic_assess"),
# #M("Search", m="search"),
#),
##M("Baseline Data")(
# #M("Population", f="population"),
##),
#M("Edit Options", restrict=ADMIN)(
# M("List / Add Baseline Types", f="baseline_type"),
# M("List / Add Impact Types", f="impact_type"),
#)
)
# -------------------------------------------------------------------------
def asset(self):
""" ASSET Controller """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="asset")(
M("Assets", f="asset")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", m="import", p="create"),
),
#M("Brands", f="brand",
# restrict=[ADMIN])(
# M("New", m="create"),
# M("List All"),
#),
M("Items", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
M("Item Categories", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Catalogs", f="catalog",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Suppliers", f="supplier")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
)
# -------------------------------------------------------------------------
def budget(self):
""" BUDGET Controller """
return M(c="budget")(
M("Parameters", f="parameters"),
M("Items", f="item")(
M("New", m="create"),
M("List"),
),
M("Kits", f="kit")(
M("New", m="create"),
M("List"),
),
M("Bundles", f="bundle")(
M("New", m="create"),
M("List"),
),
M("Staff", f="staff")(
M("New", m="create"),
M("List"),
),
M("Locations", f="location")(
M("New", m="create"),
M("List"),
),
M("Projects", f="project")(
M("New", m="create"),
M("List"),
),
M("Budgets", f="budget")(
M("New", m="create"),
M("List"),
)
)
# -------------------------------------------------------------------------
def building(self):
""" BUILDING Controller """
return M(c="building")(
M("NZSEE Level 1", f="nzseel1")(
M("Submit New (triage)", m="create",
vars={"triage":1}),
M("Submit New (full form)", m="create"),
M("List"),
M("Search", m="search"),
),
M("NZSEE Level 2", f="nzseel2")(
M("Submit New", m="create"),
M("List"),
M("Search", m="search"),
),
M("Report", f="index")(
M("Snapshot", f="report"),
M("Assessment timeline", f="timeline"),
M("Assessment admin level", f="adminLevel"),
),
)
# -------------------------------------------------------------------------
def cap(self):
""" CAP menu """
T = current.T
session = current.session
ADMIN = session.s3.system_roles.ADMIN
return M(c="cap")(
M("Alerts", f="alert", vars={'alert.is_template': 'false'})(
M("List alerts", f="alert", vars={'alert.is_template': 'false'}),
M("Create alert", f="alert", m="create"),
M("Search & Subscribe", m="search"),
),
M("Templates", f="template", vars={'alert.is_template': 'true'})(
M("List templates", f="template", vars={'alert.is_template': 'true'}),
M("Create template", f="template", m="create"),
),
#M("CAP Profile", f="profile")(
# M("Edit profile", f="profile")
#)
)
# -------------------------------------------------------------------------
def cr(self):
""" CR / Shelter Registry """
ADMIN = current.session.s3.system_roles.ADMIN
if current.deployment_settings.get_ui_label_camp():
shelter = "Camps"
types = "Camp Settings"
else:
shelter = "Shelters"
types = "Shelter Settings"
return M(c="cr")(
M(shelter, f="shelter")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
M("Report", m="report2"),
M("Import", m="import", p="create"),
),
M(types, restrict=[ADMIN])(
M("Types", f="shelter_type"),
M("Services", f="shelter_service"),
)
)
# -------------------------------------------------------------------------
def cms(self):
""" CMS / Content Management System """
return M(c="cms")(
M("Series", f="series")(
M("New", m="create"),
M("List All"),
M("View as Pages", f="blog"),
),
M("Posts", f="post")(
M("New", m="create"),
M("List All"),
M("View as Pages", f="page"),
),
)
# -------------------------------------------------------------------------
def delphi(self):
""" DELPHI / Delphi Decision Maker """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="delphi")(
M("Active Problems", f="problem")(
M("New", m="create"),
M("List All"),
),
M("Groups", f="group")(
M("New", m="create"),
M("List All"),
),
#M("Solutions", f="solution"),
#M("Administration", restrict=[ADMIN])(
#M("Groups", f="group"),
#M("Group Memberships", f="membership"),
#M("Problems", f="problem"),
#)
)
# -------------------------------------------------------------------------
def deploy(self):
""" Deployments """
return M()(
M("Human Resources",
c="deploy", f="human_resource", m="summary"),
)
# -------------------------------------------------------------------------
def doc(self):
""" DOC Menu """
return M(c="doc")(
M("Documents", f="document")(
M("New", m="create"),
M("List All"),
#M("Search", m="search")
),
M("Photos", f="image")(
M("New", m="create"),
M("List All"),
#M("Bulk Uploader", f="bulk_upload"),
#M("Search", m="search")
)
)
# -------------------------------------------------------------------------
def dvi(self):
""" DVI / Disaster Victim Identification """
return M(c="dvi")(
#M("Home", f="index"),
M("Body Recovery", f="recreq")(
M("New Request", m="create"),
M("List Current",
vars={"recreq.status":"1,2,3"}),
M("List All"),
),
M("Dead Bodies", f="body")(
M("New", m="create"),
M("List all"),
M("List unidentified",
vars=dict(status="unidentified")),
M("Search", m="search"),
M("Report by Age/Gender", m="report",
vars=dict(rows="age_group",
cols="gender",
fact="pe_label",
aggregate="count")),
),
M("Missing Persons", f="person")(
M("List all"),
),
M("Morgues", f="morgue")(
M("New", m="create"),
M("List All"),
),
M("Dashboard", f="index"),
)
# -------------------------------------------------------------------------
def dvr(self):
""" DVR Menu """
return M(c="dvr")(
M("Cases", f="case")(
M("New", m="create"),
M("List All"),
#M("Search", m="search")
),
)
# -------------------------------------------------------------------------
def event(self):
""" EVENT / Event Module """
return M()(
M("Scenarios", c="scenario", f="scenario")(
M("New", m="create"),
M("Import", m="import", p="create"),
M("View All"),
),
M("Events", c="event", f="event")(
M("New", m="create"),
M("View All"),
),
M("Incidents", c="event", f="incident")(
M("New", m="create"),
M("View All"),
),
M("Incident Types", c="event", f="incident_type")(
M("New", m="create"),
M("Import", m="import", p="create"),
M("View All"),
),
)
# -------------------------------------------------------------------------
def fire(self):
""" FIRE """
return M(c="fire")(
M("Fire Stations", f="station")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
M("Import Stations", m="import"),
M("Import Vehicles", f="station_vehicle", m="import"),
),
M("Fire Zones", f="zone")(
M("New", m="create"),
M("List All"),
#M("Map", m="map"),
#M("Search", m="search"),
#M("Import", m="import"),
),
M("Zone Types", f="zone_type")(
M("New", m="create"),
M("List All"),
#M("Map", m="map"),
#M("Search", m="search"),
#M("Import", m="import"),
),
M("Water Sources", f="water_source")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
M("Import", m="import"),
),
M("Hazard Points", f="hazard_point")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import"),
)
)
# -------------------------------------------------------------------------
def flood(self):
""" FLOOD """
return M(c="flood")(
M("Gauges", f="gauge")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
M("Import", m="import"),
),
)
# -------------------------------------------------------------------------
def gis(self):
""" GIS / GIS Controllers """
MAP_ADMIN = current.session.s3.system_roles.MAP_ADMIN
gis_menu = current.deployment_settings.get_gis_menu()
def config_menu(i):
auth = current.auth
if not auth.is_logged_in():
# Anonymous users can never cofnigure the Map
return False
s3db = current.s3db
if auth.s3_has_permission("create",
s3db.gis_config):
# If users can create configs then they can see the menu item
return True
# Look for this user's config
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
return True
def config_args():
auth = current.auth
if not auth.user:
# Won't show anyway due to check
return []
if auth.s3_has_role(MAP_ADMIN):
# Full List
return []
# Look for this user's config
s3db = current.s3db
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
# Link direct to the User's config
return [config.id, "layer_entity"]
# Link to the Create form
return ["create"]
return M(c="gis")(
M("Fullscreen Map", f="map_viewing_client"),
# Currently not got geocoding support
#M("Bulk Uploader", c="doc", f="bulk_upload"),
M("Locations", f="location")(
M("Add Location", m="create"),
#M("Add Location Group", m="create", vars={"group": 1}),
M("List All"),
M("Import from CSV", m="import", restrict=[MAP_ADMIN]),
M("Import from OpenStreetMap", m="import_poi",
restrict=[MAP_ADMIN]),
#M("Geocode", f="geocode_manual"),
),
#M("Population Report", f="location", m="report",
# vars=dict(rows="name",
# fact="population",
# aggregate="sum")),
M("Configuration", f="config", args=config_args(),
_id="gis_menu_config",
check=config_menu),
M("Admin", restrict=[MAP_ADMIN])(
M("Hierarchy", f="hierarchy"),
M("Layers", f="catalog"),
M("Markers", f="marker"),
M("Menu", f="menu",
check=[gis_menu]),
M("Projections", f="projection"),
M("Symbology", f="symbology"),
)
)
# -------------------------------------------------------------------------
def hms(self):
""" HMS / Hospital Status Assessment and Request Management """
#s3 = current.response.s3
return M(c="hms")(
M("Hospitals", f="hospital", m="search")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
M("Report", m="report2"),
M("Import", m="import", p="create"),
#SEP(),
#M("Show Map", c="gis", f="map_viewing_client",
#vars={"kml_feed" : "%s/hms/hospital.kml" %
#s3.base_url, "kml_name" : "Hospitals_"})
)
)
# -------------------------------------------------------------------------
def hrm(self):
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
vol_enabled = lambda i: settings.has_module("vol")
return M(c="hrm")(
M(settings.get_hrm_staff_label(), f="staff",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Search by skills", f="competency", args=["search"]),
M("Import", f="person", m="import",
vars={"group":"staff"}, p="create"),
),
M("Staff & Volunteers (Combined)",
c="hrm", f="human_resource", m="summary",
check=[manager_mode, vol_enabled]),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
M("Department Catalog", f="department",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Job Title Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Skill Catalog", f="skill",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Provisions", f="skill_provision"),
),
M("Training Events", f="training_event",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Training Course Catalog", f="course",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", f="certificate",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Reports", f="staff", m="report",
check=manager_mode)(
M("Staff Report", m="report"),
M("Expiring Staff Contracts Report",
vars=dict(expiring=1)),
M("Training Report", f="training", m="report2"),
),
M("Personal Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Staff Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
M("Personal Profile", f="person",
check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def vol(self):
""" Volunteer Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme"
show_tasks = lambda i: settings.has_module("project") and \
settings.get_project_mode_task()
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
show_staff = lambda i: settings.get_hrm_show_staff()
return M(c="vol")(
M("Volunteers", f="volunteer",
check=[manager_mode])(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Search by skills", f="competency", args=["search"]),
M("Import", f="person", m="import",
vars={"group":"volunteer"}, p="create"),
),
M("Staff & Volunteers (Combined)",
c="vol", f="human_resource", m="summary",
check=[manager_mode, show_staff]),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
M("Search Members", f="group_membership"),
M("Import", f="group_membership", m="import"),
),
M("Department Catalog", f="department",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Volunteer Role Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
M("Skill Catalog", f="skill",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Provisions", f="skill_provision"),
),
M("Training Events", f="training_event",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search Training Participants", f="training"),
M("Import Participant List", f="training", m="import"),
),
M("Training Course Catalog", f="course",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Course Certificates", f="course_certificate"),
),
M("Certificate Catalog", f="certificate",
check=manager_mode)(
M("New", m="create"),
M("List All"),
#M("Skill Equivalence", f="certificate_skill"),
),
M("Programs", f="programme",
check=[manager_mode, show_programmes])(
M("New", m="create"),
M("List All"),
M("Import Hours", f="programme_hours", m="import"),
),
M("Reports", f="volunteer", m="report",
check=manager_mode)(
M("Volunteer Report", m="report"),
M("Hours by Role Report", f="programme_hours", m="report2",
vars=Storage(rows="job_title_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Hours by Program Report", f="programme_hours", m="report2",
vars=Storage(rows="programme_id",
cols="month",
fact="sum(hours)"),
check=show_programmes),
M("Training Report", f="training", m="report2"),
),
M("My Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
M("My Tasks", f="task",
check=[personal_mode, show_tasks],
vars=dict(mode="personal",
mine=1)),
# This provides the link to switch to the manager mode:
M("Volunteer Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
M("Personal Profile", f="person",
check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def inv(self):
""" INV / Inventory """
ADMIN = current.session.s3.system_roles.ADMIN
current.s3db.inv_recv_crud_strings()
crud_strings = current.response.s3.crud_strings
inv_recv_list = crud_strings.inv_recv.title_list
inv_recv_search = crud_strings.inv_recv.title_search
use_commit = lambda i: current.deployment_settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
M("Warehouses", c="inv", f="warehouse")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Warehouse Stock", c="inv", f="inv_item")(
M("Search", f="inv_item", m="search"),
M("Search Shipped Items", f="track_item", m="search"),
M("Adjust Stock Levels", f="adj"),
M("Kitting", f="kit"),
M("Import", f="inv_item", m="import", p="create"),
),
M("Reports", c="inv", f="inv_item")(
M("Warehouse Stock", f="inv_item",m="report"),
M("Expiration Report", c="inv", f="track_item",
m="search", vars=dict(report="exp")),
M("Monetization Report", c="inv", f="inv_item",
m="search", vars=dict(report="mon")),
M("Utilization Report", c="inv", f="track_item",
m="search", vars=dict(report="util")),
M("Summary of Incoming Supplies", c="inv", f="track_item",
m="search", vars=dict(report="inc")),
M("Summary of Releases", c="inv", f="track_item",
m="search", vars=dict(report="rel")),
),
M(inv_recv_list, c="inv", f="recv")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("timeline", args="timeline"),
),
M("Sent Shipments", c="inv", f="send")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Search Shipped Items", f="track_item", m="search"),
M("timeline", args="timeline"),
),
M("Items", c="supply", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("New", m="create"),
#M("List All"),
#M("Search", m="search"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("New", m="create"),
# M("List All"),
#),
M("Catalogs", c="supply", f="catalog")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Suppliers", c="inv", f="supplier")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Facilities", c="inv", f="facility")(
M("New", m="create", t="org_facility"),
M("List All"),
#M("Search", m="search"),
),
M("Facility Types", c="inv", f="facility_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Requests", c="req", f="req")(
M("New", m="create"),
M("List All"),
M("Requested Items", f="req_item"),
#M("Search Requested Items", f="req_item", m="search"),
),
M("Commitments", c="req", f="commit", check=use_commit)(
M("List All")
),
)
# -------------------------------------------------------------------------
def irs(self):
""" IRS / Incident Report System """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="irs")(
M("Incident Reports", f="ireport")(
M("Add Incident Report", m="create"),
M("List All"),
M("Open Incidents", vars={"open":1}),
M("Map", m="map"),
M("Timeline", args="timeline"),
M("Import", m="import"),
M("Report", m="report2")
),
M("Incident Categories", f="icategory", restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Ushahidi Import", f="ireport", restrict=[ADMIN],
args="ushahidi")
)
# -------------------------------------------------------------------------
def security(self):
""" Security Management System """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="security")(
M("Incident Reports", c="irs", f="ireport")(
M("New", m="create"),
M("List All"),
M("Open Incidents", vars={"open":1}),
M("Map", m="map"),
M("Timeline", args="timeline"),
M("Import", m="import"),
M("Search", m="search"),
M("Report", m="report",
vars=dict(rows="L1",
cols="category",
fact="datetime",
aggregate="count"))
),
M("Incident Categories", c="irs", f="icategory",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Facilities", c="org", f="facility")(
M("New", m="create"),
M("List All"),
),
M("Facility Types", c="org", f="facility_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Zones", f="zone")(
M("New", m="create"),
M("List All"),
),
M("Zone Types", f="zone_type", restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Personnel", f="staff")(
M("New", m="create"),
M("List All Security-related Staff"),
M("List All Essential Staff", f="essential", m="search"),
),
M("Security Staff Types", f="staff_type", restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
#M("Ushahidi Import", c="irs", f="ireport", restrict=[ADMIN],
# args="ushahidi")
)
# -------------------------------------------------------------------------
def scenario(self):
""" SCENARIO """
# Use EVENT menu
return self.event()
# -------------------------------------------------------------------------
def supply(self):
""" SUPPLY """
# Use INV menu
return self.inv()
# -------------------------------------------------------------------------
def survey(self):
""" SURVEY / Survey """
ADMIN = current.session.s3.system_roles.ADMIN
# Do we have a series_id?
series_id = False
vars = Storage()
try:
series_id = int(current.request.args[0])
except:
try:
(dummy, series_id) = current.request.vars["viewing"].split(".")
series_id = int(series_id)
except:
pass
if series_id:
vars.viewing = "survey_complete.%s" % series_id
return M(c="survey")(
M("Assessment Templates", f="template")(
M("Add Assessment Templates", m="create"),
M("List All"),
),
#M("Section", f="section")(
# M("New", args="create"),
# M("List All"),
#),
M("Disaster Assessments", f="series")(
M("Add Disaster Assessments", m="create"),
M("List All"),
),
M("Administration", f="admin", restrict=[ADMIN])(
#M("New", m="create"),
#M("List All"),
M("Import Templates", f="question_list",
m="import", p="create"),
M("Import Template Layout", f="formatter",
m="import", p="create"),
M("Import Completed Assessment Forms", f="complete",
m="import", p="create", vars=vars, check=series_id),
),
)
# -------------------------------------------------------------------------
def member(self):
""" Membership Management """
return M(c="member")(
M("Members", f="membership")(
M("Add Member", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", f="person", m="import"),
),
M("Membership Types", f="membership_type")(
M("Add Membership Type", m="create"),
M("List All"),
#M("Search", m="search"),
#M("Import", m="import"),
),
)
# -------------------------------------------------------------------------
def mpr(self):
""" MPR / Missing Person Registry """
return M(c="mpr")(
M("Missing Persons", f="person")(
M("New", m="create"),
M("Search", f="index"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def msg(self):
""" MSG / Messaging """
ADMIN = current.session.s3.system_roles.ADMIN
if current.request.function in ("sms_outbound_gateway",
"email_channel",
"sms_modem_channel",
"sms_smtp_channel",
"sms_webapi_channel",
"tropo_channel",
"twitter_channel"):
return self.admin()
settings_messaging = self.settings_messaging()
return M(c="msg")(
M("Compose", f="compose"),
M("InBox", f="inbox")(
M("Email", f="email_inbox"),
M("RSS", f="rss"),
M("SMS", f="sms_inbox"),
M("Twitter", f="twitter_inbox"),
),
M("Outbox", f="outbox")(
M("Email", f="email_outbox"),
M("SMS", f="sms_outbox"),
M("Twitter", f="twitter_outbox"),
),
M("Message Log", f="message"),
M("Distribution groups", f="group")(
M("List/Add", f="group"),
M("Group Memberships", f="group_membership"),
),
M("Twitter Search", f="twitter_result")(
M("Search Queries", f="twitter_search"),
M("Results", f="twitter_result"),
# @ToDo KeyGraph Results
),
M("Administration", restrict=[ADMIN])(settings_messaging)
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
ADMIN = current.session.s3.system_roles.ADMIN
SECTORS = "Clusters" if current.deployment_settings.get_ui_label_cluster() \
else "Sectors"
return M(c="org")(
M("Organizations", f="organisation")(
M("Add Organization", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import")
),
M("Offices", f="office")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
M("Import", m="import")
),
M("Facilities", f="facility")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
M("Import", m="import")
),
M("Organization Types", f="organisation_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Office Types", f="office_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M("Facility Types", f="facility_type",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
M(SECTORS, f="sector", restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def patient(self):
""" PATIENT / Patient Tracking """
return M(c="patient")(
M("Patients", f="patient")(
M("New", m="create"),
M("List All"),
M("Search", m="search")
)
)
# -------------------------------------------------------------------------
def pr(self):
""" PR / Person Registry """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="pr", restrict=ADMIN)(
M("Person", f="person")(
M("Add Person", m="create"),
M("Search", f="index"),
M("List All"),
),
M("Groups", f="group")(
M("New", m="create"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def proc(self):
""" PROC / Procurement """
return M(c="proc")(
M("Home", f="index"),
M("Procurement Plans", f="plan")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Suppliers", f="supplier")(
M("New", m="create"),
M("List All"),
#M("Search", m="search")
),
)
# -------------------------------------------------------------------------
def project(self):
""" PROJECT / Project Tracking & Management """
settings = current.deployment_settings
#activities = settings.get_project_activities()
community = settings.get_project_community()
if community:
IMPORT = "Import Project Communities"
else:
IMPORT = "Import Project Locations"
menu = M(c="project")
if settings.get_project_mode_3w():
if community:
menu(
M("Projects", f="project")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
),
M("Communities", f="location")(
# Better created from tab (otherwise Activity Type filter won't work)
#M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
M("List All Community Contacts", f="location_contact"),
M("Search Community Contacts", f="location_contact",
m="search"),
),
)
else:
menu(
M("Projects", f="project")(
M("New", m="create"),
M("List All"),
M("Map", f="location", m="map"),
M("Search", m="search"),
)
)
stats = lambda i: settings.has_module("stats")
menu(
M("Reports", f="location", m="report")(
M("3W", f="location", m="report"),
M("Beneficiaries", f="beneficiary", m="report",
check = stats,
),
M("Funding", f="organisation", m="report"),
),
M("Import", f="project", m="import", p="create")(
M("Import Projects", m="import", p="create"),
M("Import Project Organizations", f="organisation",
m="import", p="create"),
M(IMPORT, f="location",
m="import", p="create"),
),
M("Partner Organizations", f="partners")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
M("Themes", f="theme")(
M("New", m="create"),
M("List All"),
),
M("Activity Types", f="activity_type")(
M("New", m="create"),
M("List All"),
#M("Search", m="search")
),
M("Beneficiary Types", f="beneficiary_type",
check = stats,)(
M("New", m="create"),
M("List All"),
),
M("Demographics", f="demographic",
check = stats,)(
M("New", m="create"),
M("List All"),
),
)
if settings.get_project_mode_drr():
menu(
M("Hazards", f="hazard")(
M("New", m="create"),
M("List All"),
)
)
# if settings.get_project_sectors():
# menu(
# M("Sectors", c="org", f="sector")(
# M("New", m="create"),
# M("List All"),
# )
# )
elif settings.get_project_mode_task():
menu(
M("Projects", f="project")(
M("New", m="create"),
M("List All"),
M("Open Tasks for Project", vars={"tasks":1}),
),
M("Tasks", f="task")(
M("New", m="create"),
M("Search"),
),
)
if current.auth.s3_has_role("STAFF"):
ADMIN = current.session.s3.system_roles.ADMIN
menu(
M("Daily Work", f="time")(
M("My Logged Hours", vars={"mine":1}),
M("My Open Tasks", f="task", vars={"mine":1}),
),
M("Admin", restrict=[ADMIN])(
M("Activity Types", f="activity_type"),
M("Import Tasks", f="task", m="import", p="create"),
),
M("Reports", f="report")(
M("Activity Report", f="activity", m="report2"),
M("Last Week's Work", f="time", m="report2",
vars=Storage(rows="person_id",
cols="day",
fact="sum(hours)",
week=1)),
M("Last Month's Work", f="time", m="report2",
vars=Storage(rows="person_id",
cols="week",
fact="sum(hours)",
month=1)),
M("Project Time Report", f="time", m="report2"),
),
)
else:
menu(
M("Projects", f="project")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", m="import", p="create"),
),
)
return menu
# -------------------------------------------------------------------------
def req(self):
""" REQ / Request Management """
ADMIN = current.session.s3.system_roles.ADMIN
settings = current.deployment_settings
use_commit = lambda i: settings.get_req_use_commit()
req_items = lambda i: "Stock" in settings.get_req_req_type()
req_skills = lambda i: "People" in settings.get_req_req_type()
return M(c="req")(
M("Requests", f="req")(
M("New", m="create"),
M("List All"),
M("List Recurring Requests", f="req_template"),
M("Map", m="map"),
M("Report", m="report"),
M("Search All Requested Items", f="req_item",
m="search", check=req_skills),
M("Search All Requested Skills", f="req_skill",
m="search", check=req_skills),
),
M("Commitments", f="commit", check=use_commit)(
M("List All")
),
M("Items", c="supply", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Report", m="report"),
M("Import", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("New", m="create"),
#M("List All"),
#M("Search", m="search"),
#),
M("Catalogs", c="supply", f="catalog")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("New", m="create"),
M("List All"),
),
)
# -------------------------------------------------------------------------
def stats(self):
""" Statistics """
return M(c="stats")(
M("Demographics", f="demographic")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Demographic Data", f="demographic_data")(
M("New", m="create"),
M("Import", m="import"),
M("List All"),
#M("Search", m="search"),
),
)
# -------------------------------------------------------------------------
def sync(self):
""" SYNC menu """
# Use admin menu
return self.admin()
# -------------------------------------------------------------------------
def tour(self):
""" Guided Tour """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="tour")(
M("Configuration", f="config", restrict=[ADMIN])(
M("List All"),
M("Import", m="import", restrict=[ADMIN]),
),
M("Detail", f="details", restrict=[ADMIN]),
M("User", f="user", restrict=[ADMIN]),
)
# -------------------------------------------------------------------------
def transport(self):
""" TRANSPORT """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="transport")(
M("Airports", f="airport")(
M("New", m="create"),
M("Import", m="import", restrict=[ADMIN]),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
),
M("Seaports", f="seaport")(
M("New", m="create"),
M("Import", m="import", restrict=[ADMIN]),
M("List All"),
M("Map", m="map"),
#M("Search", m="search"),
),
)
# -------------------------------------------------------------------------
def vehicle(self):
""" VEHICLE / Vehicle Tracking """
return M(c="vehicle")(
M("Vehicles", f="vehicle")(
M("New", m="create"),
M("List All"),
M("Map", m="map"),
M("Search", m="search"),
),
M("Vehicle Types", f="item")(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
),
)
# -------------------------------------------------------------------------
def vulnerability(self):
""" Vulnerability """
return M(c="vulnerability")(
M("Indicators", f="indicator")(
M("New", m="create"),
M("List All"),
#M("Search", m="search"),
),
M("Data", f="data")(
M("New", m="create"),
M("Import", m="import"),
M("List All"),
#M("Search", m="search"),
),
)
# -------------------------------------------------------------------------
@classmethod
def settings_messaging(cls):
""" Messaging settings menu items:
These items are used in multiple menus, but each item instance can
always only belong to one parent, so we need to re-instantiate
with the same parameters, and therefore this is defined as a
function here.
"""
return [
M("Email Settings", c="msg", f="email_channel"),
M("Parsing Settings", c="msg", f="parser"),
M("RSS Settings", c="msg", f="rss_channel"),
M("SMS Gateway Settings", c="msg", f="sms_outbound_gateway",
args=[1], m="update"),
M("Mobile Commons SMS Settings", c="msg", f="mcommons_channel"),
M("Twilio SMS Settings", c="msg", f="twilio_channel"),
M("Twitter Settings", c="msg", f="twitter_channel",
args=[1], m="update")
]
# -------------------------------------------------------------------------
@classmethod
def breadcrumbs(cls):
""" Breadcrumbs from the current options menu """
# Configure the layout:
layout = S3BreadcrumbsLayout
request = current.request
controller = request.controller
function = request.function
all_modules = current.deployment_settings.modules
# Start with a link to the homepage - always:
breadcrumbs = layout()(
layout(all_modules["default"].name_nice)
)
# Append the current module's homepage - always:
# @note: this may give a breadcrumb for which there's no menu item
# and should therefore perhaps be replaced by a real path-check in
# the main menu?
if controller != "default":
try:
breadcrumbs(
layout(all_modules[controller].name_nice, c=controller)
)
except:
# Module not defined
pass
# This checks the path in the options menu, omitting the top-level item
# (because that's the menu itself which doesn't have a linked label):
menu = current.menu.options
if menu and function != "index":
branch = menu.branch()
if branch:
path = branch.path()
if len(path) > 1:
for item in path[1:]:
breadcrumbs(
layout(item.label,
c=item.get("controller"),
f=item.get("function"),
args=item.args,
# Should we retain the request vars in case
# the item has no vars? Or shall we merge them
# in any case? Didn't see the use-case yet
# anywhere...
vars=item.vars))
return breadcrumbs
#-----------------------------------------------------------------------
# END =========================================================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.